] = ELLIPSE(X, Y, Rx, Ry, C)
+%
+% Inputs :
+% X : N x 1 vector of x coordinates
+% Y : N x 1 vector of y coordinates
+% Rx, Ry : Radii
+% C : Color index
+%
+%
+% Outputs :
+% P = Handles of Ellipse shaped path objects
+%
+% Usage Example : [] = ellipse();
+%
+%
+% Note :
+% See also
+
+% Uses :
+
+% Change History :
+% Date Time Prog Note
+% 27-May-1998 9:55 AM ATC Created under MATLAB 5.1.0.421
+
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+if (nargin < 2) error('Usage Example : e = ellipse([0 1],[0 -1],[1 0.5],[2 0.5]); '); end;
+if (nargin < 3) rx = 0.1; end;
+if (nargin < 4) ry = rx; end;
+if (nargin < 5) c = 1; end;
+
+if length(c)==1, c = ones(size(x)).*c; end;
+if length(rx)==1, rx = ones(size(x)).*rx; end;
+if length(ry)==1, ry = ones(size(x)).*ry; end;
+
+n = length(x);
+p = zeros(size(x));
+t = 0:pi/30:2*pi;
+for i=1:n,
+ px = rx(i)*cos(t)+x(i);
+ py = ry(i)*sin(t)+y(i);
+ p(i) = patch(px,py,c(i));
+end;
+
+if nargout>0, pp = p; end;
+
+%%%%%
+
+function [t, wd] = textbox(x,y,str)
+% TEXTBOX Draws A Box around the text
+%
+% [T, WIDTH] = TEXTBOX(X, Y, STR)
+% [..] = TEXTBOX(STR)
+%
+% Inputs :
+% X, Y : Coordinates
+% TXT : Strings
+%
+% Outputs :
+% T : Object Handles
+% WIDTH : x and y Width of boxes
+%%
+% Usage Example : t = textbox({'Ali','Veli','49','50'});
+%
+%
+% Note :
+% See also TEXTOVAL
+
+% Uses :
+
+% Change History :
+% Date Time Prog Note
+% 09-Jun-1998 11:43 AM ATC Created under MATLAB 5.1.0.421
+
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+% See
+temp = [];
+
+switch nargin,
+ case 1,
+ str = x;
+ if ~isa(str,'cell') str=cellstr(str); end;
+ N = length(str);
+ wd = zeros(N,2);
+ for i=1:N,
+ [x, y] = ginput(1);
+ tx = text(x,y,str{i},'HorizontalAlignment','center','VerticalAlign','middle');
+ [ptc wx wy] = draw_box(tx, x, y);
+ wd(i,:) = [wx wy];
+ delete(tx);
+ tx = text(x,y,str{i},'HorizontalAlignment','center','VerticalAlign','middle');
+ temp = [temp; tx ptc];
+ end;
+ case 3,
+ if ~isa(str,'cell') str=cellstr(str); end;
+ N = length(str);
+ for i=1:N,
+ tx = text(x(i),y(i),str{i},'HorizontalAlignment','center','VerticalAlign','middle');
+ [ptc wx wy] = draw_box(tx, x(i), y(i));
+ wd(i,:) = [wx wy];
+ delete(tx);
+ tx = text(x(i),y(i),str{i},'HorizontalAlignment','center','VerticalAlign','middle');
+ temp = [temp; tx ptc];
+ end;
+
+ otherwise,
+
+end;
+
+if nargout>0, t = temp; end;
+
+
+function [ptc, wx, wy] = draw_box(tx, x, y)
+% Draws a box around a tex object
+ sz = get(tx,'Extent');
+ wy = 2/3*sz(4);
+ wx = max(2/3*sz(3), wy);
+ ptc = patch([x-wx x+wx x+wx x-wx], [y+wy y+wy y-wy y-wy],'w');
+ set(ptc, 'FaceColor','w');
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/Old/graphToDot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/graphToDot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,84 @@
+function graphToDot(adj, varargin)
+% GRAPHTODOT Makes a GraphViz (AT&T) ile representing an adjacency matrix
+% function graphToDot(adj, ...)
+% Optional arguments should be passed as name/value pairs [default]
+%
+% 'filename' - if omitted, writes to 'tmp.dot'
+% 'arc_label' - arc_label{i,j} is a string attached to the i-j arc [""]
+% 'node_label' - node_label{i} is a string attached to the node i ["i"]
+% 'width' - width in inches [10]
+% 'height' - height in inches [10]
+% 'leftright' - 1 means layout left-to-right, 0 means top-to-bottom [0]
+% 'directed' - 1 means use directed arcs, 0 means undirected [1]
+%
+% For details on graphviz, See http://www.research.att.com/sw/tools/graphviz
+%
+% See also dot_to_graph and draw_dot
+%
+% First version written by Kevin Murphy 2002.
+% Modified by Leon Peshkin, Jan 2004.
+
+node_label = []; arc_label = []; % set default args
+width = 10; height = 10;
+leftright = 0; directed = 1; filename = 'tmp.dot';
+
+for i = 1:2:nargin-1 % get optional args
+ switch varargin{i}
+ case 'filename', filename = varargin{i+1};
+ case 'node_label', node_label = varargin{i+1};
+ case 'arc_label', arc_label = varargin{i+1};
+ case 'width', width = varargin{i+1};
+ case 'height', height = varargin{i+1};
+ case 'leftright', leftright = varargin{i+1};
+ case 'directed', directed = varargin{i+1};
+ end
+end
+
+fid = fopen(filename, 'w');
+if directed
+ fprintf(fid, 'digraph G {\n');
+ arctxt = '->';
+ if isempty(arc_label)
+ labeltxt = '';
+ else
+ labeltxt = '[label="%s"]';
+ end
+else
+ fprintf(fid, 'graph G {\n');
+ arctxt = '--';
+ if isempty(arc_label)
+ labeltxt = '[dir=none]';
+ else
+ labeltext = '[label="%s",dir=none]';
+ end
+end
+edgeformat = strcat(['%d ',arctxt,' %d ',labeltxt,';\n']);
+fprintf(fid, 'center = 1;\n');
+fprintf(fid, 'size=\"%d,%d\";\n', width, height);
+if leftright
+ fprintf(fid, 'rankdir=LR;\n');
+end
+Nnds = length(adj);
+for node = 1:Nnds % process nodes
+ if isempty(node_label)
+ fprintf(fid, '%d;\n', node);
+ else
+ fprintf(fid, '%d [ label = "%s" ];\n', node,
+node_label{node});
+ end
+end
+for node1 = 1:Nnds % process edges
+ if directed
+ arcs = find(adj(node1,:)); % children(adj, node);
+ else
+ arcs = find(adj(node1,node1+1:Nnds)); % remove duplicate arcs
+ end
+ for node2 = arcs
+ fprintf(fid, edgeformat, node1, node2);
+ end
+end
+fprintf(fid, '}');
+fclose(fid);
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/Old/pre_pesha_graph_to_dot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/pre_pesha_graph_to_dot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,166 @@
+function graph_to_dot(G, varargin)
+% DAG_TO_DOT Make a file representing the directed graph in dotty format.
+% dag_to_dot(G, ...)
+%
+% Optional arguments should be passed as name/value pairs [default]
+%
+% 'filename' - if omitted, we write to 'tmp.dot', convert this to 'tmp.ps',
+% and then call ghostview automatically
+% 'arc_label' - arc_label{i,j} is a string attached to the i->j arc. [""]
+% 'node_label' - node_label{i} is a string attached to node i. ["i"]
+% 'width' - width in inches [10]
+% 'height' - height in inches [10]
+% 'leftright' - 1 means layout left-to-right, 0 means top-to-bottom [0]
+% 'directed' - 1 means use directed arcs, 0 means undirected [1]
+%
+% For details on dotty, See http://www.research.att.com/sw/tools/graphviz
+%
+% Example:
+% G = rand(5,5);
+% names = cell(5,5);
+% names{1,2} = 'arc 1-2';
+% graph_to_dot(G, 'arc_label', names)
+% or graph_to_dot(G, 'arc_label', 'numbers') % prints value of G(i,j) on i->j arc
+
+% Kevin Murphy, 1998
+
+% set default args
+filename = [];
+node_label = [];
+arc_label = [];
+width = 10;
+height = 10;
+leftright = 0;
+directed = 1;
+% get optional args
+args = varargin;
+for i=1:2:length(args)
+ switch args{i}
+ case 'filename', filename = args{i+1};
+ case 'node_label', node_label = args{i+1};
+ case 'arc_label', arc_label = args{i+1};
+ case 'width', width = args{i+1};
+ case 'height', height = args{i+1};
+ case 'leftright', leftright = args{i+1};
+ case 'directed', directed = args{i+1};
+ end
+end
+
+if isstr(arc_label) & strcmp(arc_label, 'numbers')
+ N = length(G);
+ arc_label = cell(N,N);
+ for i=1:N
+ for j=1:N
+ arc_label{i,j} = sprintf('%4.2f', G(i,j));
+ end
+ end
+end
+
+if isempty(filename)
+ make_file(G, 'tmp.dot', node_label, arc_label, width, height, leftright, directed);
+ if isunix
+ !dot -Tps tmp.dot -o tmp.ps
+
+ !gs tmp.ps &
+ else
+ dos('dot -Tps tmp.dot -o tmp.ps');
+ dos('gsview32 tmp.ps &');
+ end
+else
+
+
+ make_file(G, filename, node_label, arc_label, width, height, leftright, directed);
+end
+
+
+%%%%%%
+
+function make_file(G, filename, node_label, arc_label, width, height, leftright, directed)
+
+n = length(G);
+fid = fopen(filename, 'w');
+if directed
+ fprintf(fid, 'digraph G {\n');
+else
+ fprintf(fid, 'graph G {\n');
+end
+fprintf(fid, 'center = 1;\n');
+fprintf(fid, 'size=\"%d,%d\";\n', width, height);
+if leftright
+ fprintf(fid, 'rankdir=LR;\n');
+end
+for i=1:n
+ if isempty(node_label)
+ fprintf(fid, '%d;\n', i);
+ else
+ fprintf(fid, '%d [ label = "%s" ];\n', i, node_label{i});
+ end
+end
+if directed
+ for i=1:n
+ cs = children(G,i);
+ for j=1:length(cs)
+ c = cs(j);
+ if isempty(arc_label)
+ fprintf(fid, '%d -> %d;\n', i, c);
+ else
+ fprintf(fid, '%d -> %d [label="%s"];\n', i, c, arc_label{i,c});
+ end
+ end
+ end
+else
+ for i=1:n
+ ns = intersect(neighbors(G,i), i+1:n); % remove duplicate arcs
+ for j=1:length(ns)
+ c = ns(j);
+ if isempty(arc_label)
+ fprintf(fid, '%d -- %d [dir=none];\n', i, c);
+ else
+ fprintf(fid, '%d -- %d [label="%s",dir=none];\n', i, c, arc_label{i,c});
+ end
+ end
+ end
+end
+fprintf(fid, '\n}');
+fclose(fid);
+
+
+
+%%%%%%%%%%%%%%%
+
+function cs = children(adj_mat, i, t)
+% CHILDREN Return the indices of a node's children in sorted order
+% c = children(adj_mat, i, t)
+%
+% t is an optional argument: if present, dag is assumed to be a 2-slice DBN
+
+if nargin < 3
+ cs = find(adj_mat(i,:));
+else
+ if t==1
+ cs = find(adj_mat(i,:));
+ else
+ ss = length(adj_mat)/2;
+ j = i+ss;
+ cs = find(adj_mat(j,:)) + (t-2)*ss;
+ end
+end
+
+%%%%%%%%%%%%
+
+function ps = parents(adj_mat, i)
+% PARENTS Return the list of parents of node i
+% ps = parents(adj_mat, i)
+
+ps = find(adj_mat(:,i))';
+
+%%%%%%%%%%%%%
+
+function ns = neighbors(adj_mat, i)
+% NEIGHBORS Find the parents and children of a node in a graph.
+% ns = neighbors(adj_mat, i)
+
+ns = union(children(adj_mat, i), parents(adj_mat, i));
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/README.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/README.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+Graph visualization (automatic layout) functions
+------------------------------------------------
+
+This directory contains code to automatically layout and visualize
+graphs. It provides a matlab interface to the graphviz program:
+ http://www.research.att.com/sw/tools/graphviz
+Written by Kevin Murphy, Leon Peshkin, Tom Minka.
+
+draw_graph was written by Ali Taylan Cemgil, and is entirely
+self-contained matlab: it does not need graphviz, but produces lower
+quality results.
+http://www.mbfys.kun.nl/~cemgil/matlab/layout.html
+
+See also the following URLs for other graph layout programs:
+ http://www.ics.uci.edu/~eppstein/gina/gdraw.html
+ http://www.cwi.nl/InfoVisu/
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/approxeq.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/approxeq.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function p = approxeq(a, b, tol, rel)
+% APPROXEQ Are a and b approximately equal (to within a specified tolerance)?
+% p = approxeq(a, b, thresh)
+% 'tol' defaults to 1e-3.
+% p(i) = 1 iff abs(a(i) - b(i)) < thresh
+%
+% p = approxeq(a, b, thresh, 1)
+% p(i) = 1 iff abs(a(i)-b(i))/abs(a(i)) < thresh
+
+if nargin < 3, tol = 1e-2; end
+if nargin < 4, rel = 0; end
+
+a = a(:);
+b = b(:);
+d = abs(a-b);
+if rel
+ p = ~any( (d ./ (abs(a)+eps)) > tol);
+else
+ p = ~any(d > tol);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/arrow.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/arrow.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1333 @@
+function [h,yy,zz] = arrow(varargin)
+% ARROW Draw a line with an arrowhead.
+%
+% ARROW(Start,Stop) draws a line with an arrow from Start to Stop (points
+% should be vectors of length 2 or 3, or matrices with 2 or 3
+% columns), and returns the graphics handle of the arrow(s).
+%
+% ARROW uses the mouse (click-drag) to create an arrow.
+%
+% ARROW DEMO & ARROW DEMO2 show 3-D & 2-D demos of the capabilities of ARROW.
+%
+% ARROW may be called with a normal argument list or a property-based list.
+% ARROW(Start,Stop,Length,BaseAngle,TipAngle,Width,Page,CrossDir) is
+% the full normal argument list, where all but the Start and Stop
+% points are optional. If you need to specify a later argument (e.g.,
+% Page) but want default values of earlier ones (e.g., TipAngle),
+% pass an empty matrix for the earlier ones (e.g., TipAngle=[]).
+%
+% ARROW('Property1',PropVal1,'Property2',PropVal2,...) creates arrows with the
+% given properties, using default values for any unspecified or given as
+% 'default' or NaN. Some properties used for line and patch objects are
+% used in a modified fashion, others are passed directly to LINE, PATCH,
+% or SET. For a detailed properties explanation, call ARROW PROPERTIES.
+%
+% Start The starting points. B
+% Stop The end points. /|\ ^
+% Length Length of the arrowhead in pixels. /|||\ |
+% BaseAngle Base angle in degrees (ADE). //|||\\ L|
+% TipAngle Tip angle in degrees (ABC). ///|||\\\ e|
+% Width Width of the base in pixels. ////|||\\\\ n|
+% Page Use hardcopy proportions. /////|D|\\\\\ g|
+% CrossDir Vector || to arrowhead plane. //// ||| \\\\ t|
+% NormalDir Vector out of arrowhead plane. /// ||| \\\ h|
+% Ends Which end has an arrowhead. //<----->|| \\ |
+% ObjectHandles Vector of handles to update. / base ||| \ V
+% E angle||<-------->C
+% ARROW(H,'Prop1',PropVal1,...), where H is a |||tipangle
+% vector of handles to previously-created arrows |||
+% and/or line objects, will update the previously- |||
+% created arrows according to the current view -->|A|<-- width
+% and any specified properties, and will convert
+% two-point line objects to corresponding arrows. ARROW(H) will update
+% the arrows if the current view has changed. Root, figure, or axes
+% handles included in H are replaced by all descendant Arrow objects.
+%
+% A property list can follow any specified normal argument list, e.g.,
+% ARROW([1 2 3],[0 0 0],36,'BaseAngle',60) creates an arrow from (1,2,3) to
+% the origin, with an arrowhead of length 36 pixels and 60-degree base angle.
+%
+% The basic arguments or properties can generally be vectorized to create
+% multiple arrows with the same call. This is done by passing a property
+% with one row per arrow, or, if all arrows are to have the same property
+% value, just one row may be specified.
+%
+% You may want to execute AXIS(AXIS) before calling ARROW so it doesn't change
+% the axes on you; ARROW determines the sizes of arrow components BEFORE the
+% arrow is plotted, so if ARROW changes axis limits, arrows may be malformed.
+%
+% This version of ARROW uses features of MATLAB 5 and is incompatible with
+% earlier MATLAB versions (ARROW for MATLAB 4.2c is available separately);
+% some problems with perspective plots still exist.
+
+% Copyright (c)1995-1997, Erik A. Johnson , 8/14/97
+
+% Revision history:
+% 8/14/97 EAJ Added workaround for MATLAB 5.1 scalar logical transpose bug.
+% 7/21/97 EAJ Fixed a few misc bugs.
+% 7/14/97 EAJ Make arrow([],'Prop',...) do nothing (no old handles)
+% 6/23/97 EAJ MATLAB 5 compatible version, release.
+% 5/27/97 EAJ Added Line Arrows back in. Corrected a few bugs.
+% 5/26/97 EAJ Changed missing Start/Stop to mouse-selected arrows.
+% 5/19/97 EAJ MATLAB 5 compatible version, beta.
+% 4/13/97 EAJ MATLAB 5 compatible version, alpha.
+% 1/31/97 EAJ Fixed bug with multiple arrows and unspecified Z coords.
+% 12/05/96 EAJ Fixed one more bug with log plots and NormalDir specified
+% 10/24/96 EAJ Fixed bug with log plots and NormalDir specified
+% 11/13/95 EAJ Corrected handling for 'reverse' axis directions
+% 10/06/95 EAJ Corrected occasional conflict with SUBPLOT
+% 4/24/95 EAJ A major rewrite.
+% Fall 94 EAJ Original code.
+
+% Things to be done:
+% - segment parsing, computing, and plotting into separate subfunctions
+% - change computing from Xform to Camera paradigms
+% + this will help especially with 3-D perspective plots
+% + if the WarpToFill section works right, remove warning code
+% + when perpsective works properly, remove perspective warning code
+% - add cell property values and struct property name/values (like get/set)
+% - get rid of NaN as the "default" data label
+% + perhaps change userdata to a struct and don't include (or leave
+% empty) the values specified as default; or use a cell containing
+% an empty matrix for a default value
+% - add functionality of GET to retrieve current values of ARROW properties
+
+% Many thanks to Keith Rogers for his many excellent
+% suggestions and beta testing. Check out his shareware package MATDRAW.
+% He has permission to distribute ARROW with MATDRAW.
+
+% global variable initialization
+global ARROW_PERSP_WARN ARROW_STRETCH_WARN ARROW_AXLIMITS
+if isempty(ARROW_PERSP_WARN ), ARROW_PERSP_WARN =1; end;
+if isempty(ARROW_STRETCH_WARN), ARROW_STRETCH_WARN=1; end;
+
+% Handle callbacks
+if (nargin>0 & isstr(varargin{1}) & strcmp(lower(varargin{1}),'callback')),
+ arrow_callback(varargin{2:end}); return;
+end;
+
+% Are we doing the demo?
+c = sprintf('\n');
+if (nargin==1 & isstr(varargin{1})),
+ arg1 = lower(varargin{1});
+ if strncmp(arg1,'prop',4), arrow_props;
+ elseif strncmp(arg1,'demo',4)
+ clf reset
+ demo_info = arrow_demo;
+ if ~strncmp(arg1,'demo2',5),
+ hh=arrow_demo3(demo_info);
+ else,
+ hh=arrow_demo2(demo_info);
+ end;
+ if (nargout>=1), h=hh; end;
+ elseif strncmp(arg1,'fixlimits',3),
+ arrow_fixlimits(ARROW_AXLIMITS);
+ ARROW_AXLIMITS=[];
+ elseif strncmp(arg1,'help',4),
+ disp(help(mfilename));
+ else,
+ error([upper(mfilename) ' got an unknown single-argument string ''' deblank(arg1) '''.']);
+ end;
+ return;
+end;
+
+% Check # of arguments
+if (nargout>3), error([upper(mfilename) ' produces at most 3 output arguments.']); end;
+
+% find first property number
+firstprop = nargin+1;
+for k=1:length(varargin), if ~isnumeric(varargin{k}), firstprop=k; break; end; end;
+lastnumeric = firstprop-1;
+
+% check property list
+if (firstprop<=nargin),
+ for k=firstprop:2:nargin,
+ curarg = varargin{k};
+ if ~isstr(curarg) | sum(size(curarg)>1)>1,
+ error([upper(mfilename) ' requires that a property name be a single string.']);
+ end;
+ end;
+ if (rem(nargin-firstprop,2)~=1),
+ error([upper(mfilename) ' requires that the property ''' ...
+ varargin{nargin} ''' be paired with a property value.']);
+ end;
+end;
+
+% default output
+if (nargout>0), h=[]; end;
+if (nargout>1), yy=[]; end;
+if (nargout>2), zz=[]; end;
+
+% set values to empty matrices
+start = [];
+stop = [];
+len = [];
+baseangle = [];
+tipangle = [];
+wid = [];
+page = [];
+crossdir = [];
+ends = [];
+ax = [];
+oldh = [];
+ispatch = [];
+defstart = [NaN NaN NaN];
+defstop = [NaN NaN NaN];
+deflen = 16;
+defbaseangle = 90;
+deftipangle = 16;
+defwid = 0;
+defpage = 0;
+defcrossdir = [NaN NaN NaN];
+defends = 1;
+defoldh = [];
+defispatch = 1;
+
+% The 'Tag' we'll put on our arrows
+ArrowTag = 'Arrow';
+
+% check for oldstyle arguments
+if (firstprop==2),
+ % assume arg1 is a set of handles
+ oldh = varargin{1}(:);
+ if isempty(oldh), return; end;
+elseif (firstprop>9),
+ error([upper(mfilename) ' takes at most 8 non-property arguments.']);
+elseif (firstprop>2),
+ s = str2mat('start','stop','len','baseangle','tipangle','wid','page','crossdir');
+ for k=1:firstprop-1, eval([deblank(s(k,:)) '=varargin{k};']); end;
+end;
+
+% parse property pairs
+extraprops={};
+for k=firstprop:2:nargin,
+ prop = varargin{k};
+ val = varargin{k+1};
+ prop = [lower(prop(:)') ' '];
+ if strncmp(prop,'start' ,5), start = val;
+ elseif strncmp(prop,'stop' ,4), stop = val;
+ elseif strncmp(prop,'len' ,3), len = val(:);
+ elseif strncmp(prop,'base' ,4), baseangle = val(:);
+ elseif strncmp(prop,'tip' ,3), tipangle = val(:);
+ elseif strncmp(prop,'wid' ,3), wid = val(:);
+ elseif strncmp(prop,'page' ,4), page = val;
+ elseif strncmp(prop,'cross' ,5), crossdir = val;
+ elseif strncmp(prop,'norm' ,4), if (isstr(val)), crossdir=val; else, crossdir=val*sqrt(-1); end;
+ elseif strncmp(prop,'end' ,3), ends = val;
+ elseif strncmp(prop,'object',6), oldh = val(:);
+ elseif strncmp(prop,'handle',6), oldh = val(:);
+ elseif strncmp(prop,'type' ,4), ispatch = val;
+ elseif strncmp(prop,'userd' ,5), %ignore it
+ else,
+ % make sure it is a valid patch or line property
+ eval('get(0,[''DefaultPatch'' varargin{k}]);err=0;','err=1;'); errstr=lasterr;
+ if (err), eval('get(0,[''DefaultLine'' varargin{k}]);err=0;','err=1;'); end;
+ if (err),
+ errstr(1:max(find(errstr==setstr(13)|errstr==setstr(10)))) = '';
+ error([upper(mfilename) ' got ' errstr]);
+ end;
+ extraprops={extraprops{:},varargin{k},val};
+ end;
+end;
+
+% Check if we got 'default' values
+start = arrow_defcheck(start ,defstart ,'Start' );
+stop = arrow_defcheck(stop ,defstop ,'Stop' );
+len = arrow_defcheck(len ,deflen ,'Length' );
+baseangle = arrow_defcheck(baseangle,defbaseangle,'BaseAngle' );
+tipangle = arrow_defcheck(tipangle ,deftipangle ,'TipAngle' );
+wid = arrow_defcheck(wid ,defwid ,'Width' );
+crossdir = arrow_defcheck(crossdir ,defcrossdir ,'CrossDir' );
+page = arrow_defcheck(page ,defpage ,'Page' );
+ends = arrow_defcheck(ends ,defends ,'' );
+oldh = arrow_defcheck(oldh ,[] ,'ObjectHandles');
+ispatch = arrow_defcheck(ispatch ,defispatch ,'' );
+
+% check transpose on arguments
+[m,n]=size(start ); if any(m==[2 3])&(n==1|n>3), start = start'; end;
+[m,n]=size(stop ); if any(m==[2 3])&(n==1|n>3), stop = stop'; end;
+[m,n]=size(crossdir); if any(m==[2 3])&(n==1|n>3), crossdir = crossdir'; end;
+
+% convert strings to numbers
+if ~isempty(ends) & isstr(ends),
+ endsorig = ends;
+ [m,n] = size(ends);
+ col = lower([ends(:,1:min(3,n)) ones(m,max(0,3-n))*' ']);
+ ends = NaN*ones(m,1);
+ oo = ones(1,m);
+ ii=find(all(col'==['non']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*0; end;
+ ii=find(all(col'==['sto']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*1; end;
+ ii=find(all(col'==['sta']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*2; end;
+ ii=find(all(col'==['bot']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*3; end;
+ if any(isnan(ends)),
+ ii = min(find(isnan(ends)));
+ error([upper(mfilename) ' does not recognize ' deblank(endsorig(ii,:)) ' as a valid Ends value.']);
+ end;
+else,
+ ends = ends(:);
+end;
+if ~isempty(ispatch) & isstr(ispatch),
+ col = lower(ispatch(:,1));
+ patchchar='p'; linechar='l'; defchar=' ';
+ mask = col~=patchchar & col~=linechar & col~=defchar;
+ if any(mask)
+ error([upper(mfilename) ' does not recognize ' deblank(ispatch(min(find(mask)),:)) ' as a valid Type value.']);
+ end;
+ ispatch = (col==patchchar)*1 + (col==linechar)*0 + (col==defchar)*defispatch;
+else,
+ ispatch = ispatch(:);
+end;
+oldh = oldh(:);
+
+% check object handles
+if ~all(ishandle(oldh)), error([upper(mfilename) ' got invalid object handles.']); end;
+
+% expand root, figure, and axes handles
+if ~isempty(oldh),
+ ohtype = get(oldh,'Type');
+ mask = strcmp(ohtype,'root') | strcmp(ohtype,'figure') | strcmp(ohtype,'axes');
+ if any(mask),
+ oldh = num2cell(oldh);
+ for ii=find(mask)',
+ oldh(ii) = {findobj(oldh{ii},'Tag',ArrowTag)};
+ end;
+ oldh = cat(1,oldh{:});
+ if isempty(oldh), return; end; % no arrows to modify, so just leave
+ end;
+end;
+
+% largest argument length
+[mstart,junk]=size(start); [mstop,junk]=size(stop); [mcrossdir,junk]=size(crossdir);
+argsizes = [length(oldh) mstart mstop ...
+ length(len) length(baseangle) length(tipangle) ...
+ length(wid) length(page) mcrossdir length(ends) ];
+args=['length(ObjectHandle) '; ...
+ '#rows(Start) '; ...
+ '#rows(Stop) '; ...
+ 'length(Length) '; ...
+ 'length(BaseAngle) '; ...
+ 'length(TipAngle) '; ...
+ 'length(Width) '; ...
+ 'length(Page) '; ...
+ '#rows(CrossDir) '; ...
+ '#rows(Ends) '];
+if (any(imag(crossdir(:))~=0)),
+ args(9,:) = '#rows(NormalDir) ';
+end;
+if isempty(oldh),
+ narrows = max(argsizes);
+else,
+ narrows = length(oldh);
+end;
+if (narrows<=0), narrows=1; end;
+
+% Check size of arguments
+ii = find((argsizes~=0)&(argsizes~=1)&(argsizes~=narrows));
+if ~isempty(ii),
+ s = args(ii',:);
+ while ((size(s,2)>1)&((abs(s(:,size(s,2)))==0)|(abs(s(:,size(s,2)))==abs(' ')))),
+ s = s(:,1:size(s,2)-1);
+ end;
+ s = [ones(length(ii),1)*[upper(mfilename) ' requires that '] s ...
+ ones(length(ii),1)*[' equal the # of arrows (' num2str(narrows) ').' c]];
+ s = s';
+ s = s(:)';
+ s = s(1:length(s)-1);
+ error(setstr(s));
+end;
+
+% check element length in Start, Stop, and CrossDir
+if ~isempty(start),
+ [m,n] = size(start);
+ if (n==2),
+ start = [start NaN*ones(m,1)];
+ elseif (n~=3),
+ error([upper(mfilename) ' requires 2- or 3-element Start points.']);
+ end;
+end;
+if ~isempty(stop),
+ [m,n] = size(stop);
+ if (n==2),
+ stop = [stop NaN*ones(m,1)];
+ elseif (n~=3),
+ error([upper(mfilename) ' requires 2- or 3-element Stop points.']);
+ end;
+end;
+if ~isempty(crossdir),
+ [m,n] = size(crossdir);
+ if (n<3),
+ crossdir = [crossdir NaN*ones(m,3-n)];
+ elseif (n~=3),
+ if (all(imag(crossdir(:))==0)),
+ error([upper(mfilename) ' requires 2- or 3-element CrossDir vectors.']);
+ else,
+ error([upper(mfilename) ' requires 2- or 3-element NormalDir vectors.']);
+ end;
+ end;
+end;
+
+% fill empty arguments
+if isempty(start ), start = [Inf Inf Inf]; end;
+if isempty(stop ), stop = [Inf Inf Inf]; end;
+if isempty(len ), len = Inf; end;
+if isempty(baseangle ), baseangle = Inf; end;
+if isempty(tipangle ), tipangle = Inf; end;
+if isempty(wid ), wid = Inf; end;
+if isempty(page ), page = Inf; end;
+if isempty(crossdir ), crossdir = [Inf Inf Inf]; end;
+if isempty(ends ), ends = Inf; end;
+if isempty(ispatch ), ispatch = Inf; end;
+
+% expand single-column arguments
+o = ones(narrows,1);
+if (size(start ,1)==1), start = o * start ; end;
+if (size(stop ,1)==1), stop = o * stop ; end;
+if (length(len )==1), len = o * len ; end;
+if (length(baseangle )==1), baseangle = o * baseangle ; end;
+if (length(tipangle )==1), tipangle = o * tipangle ; end;
+if (length(wid )==1), wid = o * wid ; end;
+if (length(page )==1), page = o * page ; end;
+if (size(crossdir ,1)==1), crossdir = o * crossdir ; end;
+if (length(ends )==1), ends = o * ends ; end;
+if (length(ispatch )==1), ispatch = o * ispatch ; end;
+ax = o * gca;
+
+% if we've got handles, get the defaults from the handles
+if ~isempty(oldh),
+ for k=1:narrows,
+ oh = oldh(k);
+ ud = get(oh,'UserData');
+ ax(k) = get(oh,'Parent');
+ ohtype = get(oh,'Type');
+ if strcmp(get(oh,'Tag'),ArrowTag), % if it's an arrow already
+ if isinf(ispatch(k)), ispatch(k)=strcmp(ohtype,'patch'); end;
+ % arrow UserData format: [start' stop' len base tip wid page crossdir' ends]
+ start0 = ud(1:3);
+ stop0 = ud(4:6);
+ if (isinf(len(k))), len(k) = ud( 7); end;
+ if (isinf(baseangle(k))), baseangle(k) = ud( 8); end;
+ if (isinf(tipangle(k))), tipangle(k) = ud( 9); end;
+ if (isinf(wid(k))), wid(k) = ud(10); end;
+ if (isinf(page(k))), page(k) = ud(11); end;
+ if (isinf(crossdir(k,1))), crossdir(k,1) = ud(12); end;
+ if (isinf(crossdir(k,2))), crossdir(k,2) = ud(13); end;
+ if (isinf(crossdir(k,3))), crossdir(k,3) = ud(14); end;
+ if (isinf(ends(k))), ends(k) = ud(15); end;
+ elseif strcmp(ohtype,'line')|strcmp(ohtype,'patch'), % it's a non-arrow line or patch
+ convLineToPatch = 1; %set to make arrow patches when converting from lines.
+ if isinf(ispatch(k)), ispatch(k)=convLineToPatch|strcmp(ohtype,'patch'); end;
+ x=get(oh,'XData'); x=x(~isnan(x(:))); if isempty(x), x=NaN; end;
+ y=get(oh,'YData'); y=y(~isnan(y(:))); if isempty(y), y=NaN; end;
+ z=get(oh,'ZData'); z=z(~isnan(z(:))); if isempty(z), z=NaN; end;
+ start0 = [x(1) y(1) z(1) ];
+ stop0 = [x(end) y(end) z(end)];
+ else,
+ error([upper(mfilename) ' cannot convert ' ohtype ' objects.']);
+ end;
+ ii=find(isinf(start(k,:))); if ~isempty(ii), start(k,ii)=start0(ii); end;
+ ii=find(isinf(stop( k,:))); if ~isempty(ii), stop( k,ii)=stop0( ii); end;
+ end;
+end;
+
+% convert Inf's to NaN's
+start( isinf(start )) = NaN;
+stop( isinf(stop )) = NaN;
+len( isinf(len )) = NaN;
+baseangle( isinf(baseangle)) = NaN;
+tipangle( isinf(tipangle )) = NaN;
+wid( isinf(wid )) = NaN;
+page( isinf(page )) = NaN;
+crossdir( isinf(crossdir )) = NaN;
+ends( isinf(ends )) = NaN;
+ispatch( isinf(ispatch )) = NaN;
+
+% set up the UserData data (here so not corrupted by log10's and such)
+ud = [start stop len baseangle tipangle wid page crossdir ends];
+
+% Set Page defaults
+%page = (~isnan(page))&(page);
+if isnan(page)
+ page = 0;
+end
+
+% Get axes limits, range, min; correct for aspect ratio and log scale
+axm = zeros(3,narrows);
+axr = zeros(3,narrows);
+axrev = zeros(3,narrows);
+ap = zeros(2,narrows);
+xyzlog = zeros(3,narrows);
+limmin = zeros(2,narrows);
+limrange = zeros(2,narrows);
+oldaxlims = zeros(narrows,7);
+oneax = all(ax==ax(1));
+if (oneax),
+ T = zeros(4,4);
+ invT = zeros(4,4);
+else,
+ T = zeros(16,narrows);
+ invT = zeros(16,narrows);
+end;
+axnotdone = logical(ones(size(ax)));
+while (any(axnotdone)),
+ ii = min(find(axnotdone));
+ curax = ax(ii);
+ curpage = page(ii);
+ % get axes limits and aspect ratio
+ axl = [get(curax,'XLim'); get(curax,'YLim'); get(curax,'ZLim')];
+ oldaxlims(min(find(oldaxlims(:,1)==0)),:) = [curax reshape(axl',1,6)];
+ % get axes size in pixels (points)
+ u = get(curax,'Units');
+ axposoldunits = get(curax,'Position');
+ really_curpage = curpage & strcmp(u,'normalized');
+ if (really_curpage),
+ curfig = get(curax,'Parent');
+ pu = get(curfig,'PaperUnits');
+ set(curfig,'PaperUnits','points');
+ pp = get(curfig,'PaperPosition');
+ set(curfig,'PaperUnits',pu);
+ set(curax,'Units','pixels');
+ curapscreen = get(curax,'Position');
+ set(curax,'Units','normalized');
+ curap = pp.*get(curax,'Position');
+ else,
+ set(curax,'Units','pixels');
+ curapscreen = get(curax,'Position');
+ curap = curapscreen;
+ end;
+ set(curax,'Units',u);
+ set(curax,'Position',axposoldunits);
+ % handle non-stretched axes position
+ str_stretch = { 'DataAspectRatioMode' ; ...
+ 'PlotBoxAspectRatioMode' ; ...
+ 'CameraViewAngleMode' };
+ str_camera = { 'CameraPositionMode' ; ...
+ 'CameraTargetMode' ; ...
+ 'CameraViewAngleMode' ; ...
+ 'CameraUpVectorMode' };
+ notstretched = strcmp(get(curax,str_stretch),'manual');
+ manualcamera = strcmp(get(curax,str_camera),'manual');
+ if ~arrow_WarpToFill(notstretched,manualcamera,curax),
+ % give a warning that this has not been thoroughly tested
+ if 0 & ARROW_STRETCH_WARN,
+ ARROW_STRETCH_WARN = 0;
+ strs = {str_stretch{1:2},str_camera{:}};
+ strs = [char(ones(length(strs),1)*sprintf('\n ')) char(strs)]';
+ warning([upper(mfilename) ' may not yet work quite right ' ...
+ 'if any of the following are ''manual'':' strs(:).']);
+ end;
+ % find the true pixel size of the actual axes
+ texttmp = text(axl(1,[1 2 2 1 1 2 2 1]), ...
+ axl(2,[1 1 2 2 1 1 2 2]), ...
+ axl(3,[1 1 1 1 2 2 2 2]),'');
+ set(texttmp,'Units','points');
+ textpos = get(texttmp,'Position');
+ delete(texttmp);
+ textpos = cat(1,textpos{:});
+ textpos = max(textpos(:,1:2)) - min(textpos(:,1:2));
+ % adjust the axes position
+ if (really_curpage),
+ % adjust to printed size
+ textpos = textpos * min(curap(3:4)./textpos);
+ curap = [curap(1:2)+(curap(3:4)-textpos)/2 textpos];
+ else,
+ % adjust for pixel roundoff
+ textpos = textpos * min(curapscreen(3:4)./textpos);
+ curap = [curap(1:2)+(curap(3:4)-textpos)/2 textpos];
+ end;
+ end;
+ if ARROW_PERSP_WARN & ~strcmp(get(curax,'Projection'),'orthographic'),
+ ARROW_PERSP_WARN = 0;
+ warning([upper(mfilename) ' does not yet work right for 3-D perspective projection.']);
+ end;
+ % adjust limits for log scale on axes
+ curxyzlog = [strcmp(get(curax,'XScale'),'log'); ...
+ strcmp(get(curax,'YScale'),'log'); ...
+ strcmp(get(curax,'ZScale'),'log')];
+ if (any(curxyzlog)),
+ ii = find([curxyzlog;curxyzlog]);
+ if (any(axl(ii)<=0)),
+ error([upper(mfilename) ' does not support non-positive limits on log-scaled axes.']);
+ else,
+ axl(ii) = log10(axl(ii));
+ end;
+ end;
+ % correct for 'reverse' direction on axes;
+ curreverse = [strcmp(get(curax,'XDir'),'reverse'); ...
+ strcmp(get(curax,'YDir'),'reverse'); ...
+ strcmp(get(curax,'ZDir'),'reverse')];
+ ii = find(curreverse);
+ if ~isempty(ii),
+ axl(ii,[1 2])=-axl(ii,[2 1]);
+ end;
+ % compute the range of 2-D values
+ curT = get(curax,'Xform');
+ lim = curT*[0 1 0 1 0 1 0 1;0 0 1 1 0 0 1 1;0 0 0 0 1 1 1 1;1 1 1 1 1 1 1 1];
+ lim = lim(1:2,:)./([1;1]*lim(4,:));
+ curlimmin = min(lim')';
+ curlimrange = max(lim')' - curlimmin;
+ curinvT = inv(curT);
+ if (~oneax),
+ curT = curT.';
+ curinvT = curinvT.';
+ curT = curT(:);
+ curinvT = curinvT(:);
+ end;
+ % check which arrows to which cur corresponds
+ ii = find((ax==curax)&(page==curpage));
+ oo = ones(1,length(ii));
+ axr(:,ii) = diff(axl')' * oo;
+ axm(:,ii) = axl(:,1) * oo;
+ axrev(:,ii) = curreverse * oo;
+ ap(:,ii) = curap(3:4)' * oo;
+ xyzlog(:,ii) = curxyzlog * oo;
+ limmin(:,ii) = curlimmin * oo;
+ limrange(:,ii) = curlimrange * oo;
+ if (oneax),
+ T = curT;
+ invT = curinvT;
+ else,
+ T(:,ii) = curT * oo;
+ invT(:,ii) = curinvT * oo;
+ end;
+ axnotdone(ii) = zeros(1,length(ii));
+end;
+oldaxlims(oldaxlims(:,1)==0,:)=[];
+
+% correct for log scales
+curxyzlog = xyzlog.';
+ii = find(curxyzlog(:));
+if ~isempty(ii),
+ start( ii) = real(log10(start( ii)));
+ stop( ii) = real(log10(stop( ii)));
+ if (all(imag(crossdir)==0)), % pulled (ii) subscript on crossdir, 12/5/96 eaj
+ crossdir(ii) = real(log10(crossdir(ii)));
+ end;
+end;
+
+% correct for reverse directions
+ii = find(axrev.');
+if ~isempty(ii),
+ start( ii) = -start( ii);
+ stop( ii) = -stop( ii);
+ crossdir(ii) = -crossdir(ii);
+end;
+
+% transpose start/stop values
+start = start.';
+stop = stop.';
+
+% take care of defaults, page was done above
+ii=find(isnan(start(:) )); if ~isempty(ii), start(ii) = axm(ii)+axr(ii)/2; end;
+ii=find(isnan(stop(:) )); if ~isempty(ii), stop(ii) = axm(ii)+axr(ii)/2; end;
+ii=find(isnan(crossdir(:) )); if ~isempty(ii), crossdir(ii) = zeros(length(ii),1); end;
+ii=find(isnan(len )); if ~isempty(ii), len(ii) = ones(length(ii),1)*deflen; end;
+ii=find(isnan(baseangle )); if ~isempty(ii), baseangle(ii) = ones(length(ii),1)*defbaseangle; end;
+ii=find(isnan(tipangle )); if ~isempty(ii), tipangle(ii) = ones(length(ii),1)*deftipangle; end;
+ii=find(isnan(wid )); if ~isempty(ii), wid(ii) = ones(length(ii),1)*defwid; end;
+ii=find(isnan(ends )); if ~isempty(ii), ends(ii) = ones(length(ii),1)*defends; end;
+
+% transpose rest of values
+len = len.';
+baseangle = baseangle.';
+tipangle = tipangle.';
+wid = wid.';
+page = page.';
+crossdir = crossdir.';
+ends = ends.';
+ax = ax.';
+
+% given x, a 3xN matrix of points in 3-space;
+% want to convert to X, the corresponding 4xN 2-space matrix
+%
+% tmp1=[(x-axm)./axr; ones(1,size(x,1))];
+% if (oneax), X=T*tmp1;
+% else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T.*tmp1;
+% tmp2=zeros(4,4*N); tmp2(:)=tmp1(:);
+% X=zeros(4,N); X(:)=sum(tmp2)'; end;
+% X = X ./ (ones(4,1)*X(4,:));
+
+% for all points with start==stop, start=stop-(verysmallvalue)*(up-direction);
+ii = find(all(start==stop));
+if ~isempty(ii),
+ % find an arrowdir vertical on screen and perpendicular to viewer
+ % transform to 2-D
+ tmp1 = [(stop(:,ii)-axm(:,ii))./axr(:,ii);ones(1,length(ii))];
+ if (oneax), twoD=T*tmp1;
+ else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T(:,ii).*tmp1;
+ tmp2=zeros(4,4*length(ii)); tmp2(:)=tmp1(:);
+ twoD=zeros(4,length(ii)); twoD(:)=sum(tmp2)'; end;
+ twoD=twoD./(ones(4,1)*twoD(4,:));
+ % move the start point down just slightly
+ tmp1 = twoD + [0;-1/1000;0;0]*(limrange(2,ii)./ap(2,ii));
+ % transform back to 3-D
+ if (oneax), threeD=invT*tmp1;
+ else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=invT(:,ii).*tmp1;
+ tmp2=zeros(4,4*length(ii)); tmp2(:)=tmp1(:);
+ threeD=zeros(4,length(ii)); threeD(:)=sum(tmp2)'; end;
+ start(:,ii) = (threeD(1:3,:)./(ones(3,1)*threeD(4,:))).*axr(:,ii)+axm(:,ii);
+end;
+
+% compute along-arrow points
+% transform Start points
+ tmp1=[(start-axm)./axr;ones(1,narrows)];
+ if (oneax), X0=T*tmp1;
+ else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T.*tmp1;
+ tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
+ X0=zeros(4,narrows); X0(:)=sum(tmp2)'; end;
+ X0=X0./(ones(4,1)*X0(4,:));
+% transform Stop points
+ tmp1=[(stop-axm)./axr;ones(1,narrows)];
+ if (oneax), Xf=T*tmp1;
+ else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T.*tmp1;
+ tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:);
+ Xf=zeros(4,narrows); Xf(:)=sum(tmp2)'; end;
+ Xf=Xf./(ones(4,1)*Xf(4,:));
+% compute pixel distance between points
+ D = sqrt(sum(((Xf(1:2,:)-X0(1:2,:)).*(ap./limrange)).^2));
+% compute and modify along-arrow distances
+ len1 = len;
+ len2 = len - (len.*tan(tipangle/180*pi)-wid/2).*tan((90-baseangle)/180*pi);
+ slen0 = zeros(1,narrows);
+ slen1 = len1 .* ((ends==2)|(ends==3));
+ slen2 = len2 .* ((ends==2)|(ends==3));
+ len0 = zeros(1,narrows);
+ len1 = len1 .* ((ends==1)|(ends==3));
+ len2 = len2 .* ((ends==1)|(ends==3));
+ % for no start arrowhead
+ ii=find((ends==1)&(D0), set(H,extraprops{:}); end;
+ % handle choosing arrow Start and/or Stop locations if unspecified
+ [H,oldaxlims,errstr] = arrow_clicks(H,ud,x,y,z,ax,oldaxlims);
+ if ~isempty(errstr), error([upper(mfilename) ' got ' errstr]); end;
+ % set the output
+ if (nargout>0), h=H; end;
+ % make sure the axis limits did not change
+ if isempty(oldaxlims),
+ ARROW_AXLIMITS = [];
+ else,
+ lims = get(oldaxlims(:,1),{'XLim','YLim','ZLim'})';
+ lims = reshape(cat(2,lims{:}),6,size(lims,2));
+ %mask = arrow_is2DXY(oldaxlims(:,1));
+ %oldaxlims(mask,6:7) = lims(5:6,mask)';
+ ARROW_AXLIMITS = oldaxlims(find(any(oldaxlims(:,2:7)'~=lims)),:);
+ if ~isempty(ARROW_AXLIMITS),
+ warning(arrow_warnlimits(ARROW_AXLIMITS,narrows));
+ end;
+ end;
+else,
+ % don't create the patch, just return the data
+ h=x;
+ yy=y;
+ zz=z;
+end;
+
+
+
+function out = arrow_defcheck(in,def,prop)
+% check if we got 'default' values
+ out = in;
+ if ~isstr(in), return; end;
+ if size(in,1)==1 & strncmp(lower(in),'def',3),
+ out = def;
+ elseif ~isempty(prop),
+ error([upper(mfilename) ' does not recognize ''' in(:)' ''' as a valid ''' prop ''' string.']);
+ end;
+
+
+
+function [H,oldaxlims,errstr] = arrow_clicks(H,ud,x,y,z,ax,oldaxlims)
+% handle choosing arrow Start and/or Stop locations if necessary
+ errstr = '';
+ if isempty(H)|isempty(ud)|isempty(x), return; end;
+ % determine which (if any) need Start and/or Stop
+ needStart = all(isnan(ud(:,1:3)'))';
+ needStop = all(isnan(ud(:,4:6)'))';
+ mask = any(needStart|needStop);
+ if ~any(mask), return; end;
+ ud(~mask,:)=[]; ax(:,~mask)=[];
+ x(:,~mask)=[]; y(:,~mask)=[]; z(:,~mask)=[];
+ % make them invisible for the time being
+ set(H,'Visible','off');
+ % save the current axes and limits modes; set to manual for the time being
+ oldAx = gca;
+ limModes=get(ax(:),{'XLimMode','YLimMode','ZLimMode'});
+ set(ax(:),{'XLimMode','YLimMode','ZLimMode'},{'manual','manual','manual'});
+ % loop over each arrow that requires attention
+ jj = find(mask);
+ for ii=1:length(jj),
+ h = H(jj(ii));
+ axes(ax(ii));
+ % figure out correct call
+ if needStart(ii), prop='Start'; else, prop='Stop'; end;
+ [wasInterrupted,errstr] = arrow_click(needStart(ii)&needStop(ii),h,prop,ax(ii));
+ % handle errors and control-C
+ if wasInterrupted,
+ delete(H(jj(ii:end)));
+ H(jj(ii:end))=[];
+ oldaxlims(jj(ii:end),:)=[];
+ break;
+ end;
+ end;
+ % restore the axes and limit modes
+ axes(oldAx);
+ set(ax(:),{'XLimMode','YLimMode','ZLimMode'},limModes);
+
+function [wasInterrupted,errstr] = arrow_click(lockStart,H,prop,ax)
+% handle the clicks for one arrow
+ fig = get(ax,'Parent');
+ % save some things
+ oldFigProps = {'Pointer','WindowButtonMotionFcn','WindowButtonUpFcn'};
+ oldFigValue = get(fig,oldFigProps);
+ oldArrowProps = {'EraseMode'};
+ oldArrowValue = get(H,oldArrowProps);
+ set(H,'EraseMode','background'); %because 'xor' makes shaft invisible unless Width>1
+ global ARROW_CLICK_H ARROW_CLICK_PROP ARROW_CLICK_AX ARROW_CLICK_USE_Z
+ ARROW_CLICK_H=H; ARROW_CLICK_PROP=prop; ARROW_CLICK_AX=ax;
+ ARROW_CLICK_USE_Z=~arrow_is2DXY(ax)|~arrow_planarkids(ax);
+ set(fig,'Pointer','crosshair');
+ % set up the WindowButtonMotion so we can see the arrow while moving around
+ set(fig,'WindowButtonUpFcn','set(gcf,''WindowButtonUpFcn'','''')', ...
+ 'WindowButtonMotionFcn','');
+ if ~lockStart,
+ set(H,'Visible','on');
+ set(fig,'WindowButtonMotionFcn',[mfilename '(''callback'',''motion'');']);
+ end;
+ % wait for the button to be pressed
+ [wasKeyPress,wasInterrupted,errstr] = arrow_wfbdown(fig);
+ % if we wanted to click-drag, set the Start point
+ if lockStart & ~wasInterrupted,
+ pt = arrow_point(ARROW_CLICK_AX,ARROW_CLICK_USE_Z);
+ feval(mfilename,H,'Start',pt,'Stop',pt);
+ set(H,'Visible','on');
+ ARROW_CLICK_PROP='Stop';
+ set(fig,'WindowButtonMotionFcn',[mfilename '(''callback'',''motion'');']);
+ % wait for the mouse button to be released
+ eval('waitfor(fig,''WindowButtonUpFcn'','''');','wasInterrupted=1;');
+ if wasInterrupted, errstr=lasterr; end;
+ end;
+ if ~wasInterrupted, feval(mfilename,'callback','motion'); end;
+ % restore some things
+ set(gcf,oldFigProps,oldFigValue);
+ set(H,oldArrowProps,oldArrowValue);
+
+function arrow_callback(varargin)
+% handle redrawing callbacks
+ if nargin==0, return; end;
+ str = varargin{1};
+ if ~isstr(str), error([upper(mfilename) ' got an invalid Callback command.']); end;
+ s = lower(str);
+ if strcmp(s,'motion'),
+ % motion callback
+ global ARROW_CLICK_H ARROW_CLICK_PROP ARROW_CLICK_AX ARROW_CLICK_USE_Z
+ feval(mfilename,ARROW_CLICK_H,ARROW_CLICK_PROP,arrow_point(ARROW_CLICK_AX,ARROW_CLICK_USE_Z));
+ drawnow;
+ else,
+ error([upper(mfilename) ' does not recognize ''' str(:).' ''' as a valid Callback option.']);
+ end;
+
+function out = arrow_point(ax,use_z)
+% return the point on the given axes
+ if nargin==0, ax=gca; end;
+ if nargin<2, use_z=~arrow_is2DXY(ax)|~arrow_planarkids(ax); end;
+ out = get(ax,'CurrentPoint');
+ out = out(1,:);
+ if ~use_z, out=out(1:2); end;
+
+function [wasKeyPress,wasInterrupted,errstr] = arrow_wfbdown(fig)
+% wait for button down ignoring object ButtonDownFcn's
+ if nargin==0, fig=gcf; end;
+ errstr = '';
+ % save ButtonDownFcn values
+ objs = findobj(fig);
+ buttonDownFcns = get(objs,'ButtonDownFcn');
+ mask=~strcmp(buttonDownFcns,''); objs=objs(mask); buttonDownFcns=buttonDownFcns(mask);
+ set(objs,'ButtonDownFcn','');
+ % save other figure values
+ figProps = {'KeyPressFcn','WindowButtonDownFcn'};
+ figValue = get(fig,figProps);
+ % do the real work
+ set(fig,'KeyPressFcn','set(gcf,''KeyPressFcn'','''',''WindowButtonDownFcn'','''');', ...
+ 'WindowButtonDownFcn','set(gcf,''WindowButtonDownFcn'','''')');
+ lasterr('');
+ wasInterrupted=0; eval('waitfor(fig,''WindowButtonDownFcn'','''');','wasInterrupted=1;');
+ wasKeyPress = ~wasInterrupted & strcmp(get(fig,'KeyPressFcn'),'');
+ if wasInterrupted, errstr=lasterr; end;
+ % restore ButtonDownFcn and other figure values
+ set(objs,'ButtonDownFcn',buttonDownFcns);
+ set(fig,figProps,figValue);
+
+
+
+function [out,is2D] = arrow_is2DXY(ax)
+% check if axes are 2-D X-Y plots
+ % may not work for modified camera angles, etc.
+ out = zeros(size(ax)); % 2-D X-Y plots
+ is2D = out; % any 2-D plots
+ views = get(ax(:),{'View'});
+ views = cat(1,views{:});
+ out(:) = abs(views(:,2))==90;
+ is2D(:) = out(:) | all(rem(views',90)==0)';
+
+function out = arrow_planarkids(ax)
+% check if axes descendents all have empty ZData (lines,patches,surfaces)
+ out = logical(ones(size(ax)));
+ allkids = get(ax(:),{'Children'});
+ for k=1:length(allkids),
+ kids = get([findobj(allkids{k},'flat','Type','line')
+ findobj(allkids{k},'flat','Type','patch')
+ findobj(allkids{k},'flat','Type','surface')],{'ZData'});
+ for j=1:length(kids),
+ if ~isempty(kids{j}), out(k)=logical(0); break; end;
+ end;
+ end;
+
+
+
+function arrow_fixlimits(axlimits)
+% reset the axis limits as necessary
+ if isempty(axlimits), disp([upper(mfilename) ' does not remember any axis limits to reset.']); end;
+ for k=1:size(axlimits,1),
+ if any(get(axlimits(k,1),'XLim')~=axlimits(k,2:3)), set(axlimits(k,1),'XLim',axlimits(k,2:3)); end;
+ if any(get(axlimits(k,1),'YLim')~=axlimits(k,4:5)), set(axlimits(k,1),'YLim',axlimits(k,4:5)); end;
+ if any(get(axlimits(k,1),'ZLim')~=axlimits(k,6:7)), set(axlimits(k,1),'ZLim',axlimits(k,6:7)); end;
+ end;
+
+
+
+function out = arrow_WarpToFill(notstretched,manualcamera,curax)
+% check if we are in "WarpToFill" mode.
+ out = strcmp(get(curax,'WarpToFill'),'on');
+ % 'WarpToFill' is undocumented, so may need to replace this by
+ % out = ~( any(notstretched) & any(manualcamera) );
+
+
+
+function out = arrow_warnlimits(axlimits,narrows)
+% create a warning message if we've changed the axis limits
+ msg = '';
+ switch (size(axlimits,1)==1)
+ case 1, msg='';
+ case 2, msg='on two axes ';
+ otherwise, msg='on several axes ';
+ end;
+ msg = [upper(mfilename) ' changed the axis limits ' msg ...
+ 'when adding the arrow'];
+ if (narrows>1), msg=[msg 's']; end;
+ out = [msg '.' sprintf('\n') ' Call ' upper(mfilename) ...
+ ' FIXLIMITS to reset them now.'];
+
+
+
+function arrow_copyprops(fm,to)
+% copy line properties to patches
+ props = {'EraseMode','LineStyle','LineWidth','Marker','MarkerSize',...
+ 'MarkerEdgeColor','MarkerFaceColor','ButtonDownFcn', ...
+ 'Clipping','DeleteFcn','BusyAction','HandleVisibility', ...
+ 'Selected','SelectionHighlight','Visible'};
+ lineprops = {'Color', props{:}};
+ patchprops = {'EdgeColor',props{:}};
+ patch2props = {'FaceColor',patchprops{:}};
+ fmpatch = strcmp(get(fm,'Type'),'patch');
+ topatch = strcmp(get(to,'Type'),'patch');
+ set(to( fmpatch& topatch),patch2props,get(fm( fmpatch& topatch),patch2props)); %p->p
+ set(to(~fmpatch&~topatch),lineprops, get(fm(~fmpatch&~topatch),lineprops )); %l->l
+ set(to( fmpatch&~topatch),lineprops, get(fm( fmpatch&~topatch),patchprops )); %p->l
+ set(to(~fmpatch& topatch),patchprops, get(fm(~fmpatch& topatch),lineprops) ,'FaceColor','none'); %l->p
+
+
+
+function arrow_props
+% display further help info about ARROW properties
+ c = sprintf('\n');
+ disp([c ...
+ 'ARROW Properties: Default values are given in [square brackets], and other' c ...
+ ' acceptable equivalent property names are in (parenthesis).' c c ...
+ ' Start The starting points. For N arrows, B' c ...
+ ' this should be a Nx2 or Nx3 matrix. /|\ ^' c ...
+ ' Stop The end points. For N arrows, this /|||\ |' c ...
+ ' should be a Nx2 or Nx3 matrix. //|||\\ L|' c ...
+ ' Length Length of the arrowhead (in pixels on ///|||\\\ e|' c ...
+ ' screen, points on a page). [16] (Len) ////|||\\\\ n|' c ...
+ ' BaseAngle Angle (degrees) of the base angle /////|D|\\\\\ g|' c ...
+ ' ADE. For a simple stick arrow, use //// ||| \\\\ t|' c ...
+ ' BaseAngle=TipAngle. [90] (Base) /// ||| \\\ h|' c ...
+ ' TipAngle Angle (degrees) of tip angle ABC. //<----->|| \\ |' c ...
+ ' [16] (Tip) / base ||| \ V' c ...
+ ' Width Width of the base in pixels. Not E angle ||<-------->C' c ...
+ ' the ''LineWidth'' prop. [0] (Wid) |||tipangle' c ...
+ ' Page If provided, non-empty, and not NaN, |||' c ...
+ ' this causes ARROW to use hardcopy |||' c ...
+ ' rather than onscreen proportions. A' c ...
+ ' This is important if screen aspect --> <-- width' c ...
+ ' ratio and hardcopy aspect ratio are ----CrossDir---->' c ...
+ ' vastly different. []' c...
+ ' CrossDir A vector giving the direction towards which the fletches' c ...
+ ' on the arrow should go. [computed such that it is perpen-' c ...
+ ' dicular to both the arrow direction and the view direction' c ...
+ ' (i.e., as if it was pasted on a normal 2-D graph)] (Note' c ...
+ ' that CrossDir is a vector. Also note that if an axis is' c ...
+ ' plotted on a log scale, then the corresponding component' c ...
+ ' of CrossDir must also be set appropriately, i.e., to 1 for' c ...
+ ' no change in that direction, >1 for a positive change, >0' c ...
+ ' and <1 for negative change.)' c ...
+ ' NormalDir A vector normal to the fletch direction (CrossDir is then' c ...
+ ' computed by the vector cross product [Line]x[NormalDir]). []' c ...
+ ' (Note that NormalDir is a vector. Unlike CrossDir,' c ...
+ ' NormalDir is used as is regardless of log-scaled axes.)' c ...
+ ' Ends Set which end has an arrowhead. Valid values are ''none'',' c ...
+ ' ''stop'', ''start'', and ''both''. [''stop''] (End)' c...
+ ' ObjectHandles Vector of handles to previously-created arrows to be' c ...
+ ' updated or line objects to be converted to arrows.' c ...
+ ' [] (Object,Handle)' c ]);
+
+
+
+function out = arrow_demo
+ % demo
+ % create the data
+ [x,y,z] = peaks;
+ [ddd,out.iii]=max(z(:));
+ out.axlim = [min(x(:)) max(x(:)) min(y(:)) max(y(:)) min(z(:)) max(z(:))];
+
+ % modify it by inserting some NaN's
+ [m,n] = size(z);
+ m = floor(m/2);
+ n = floor(n/2);
+ z(1:m,1:n) = NaN*ones(m,n);
+
+ % graph it
+ clf('reset');
+ out.hs=surf(x,y,z);
+ out.x=x; out.y=y; out.z=z;
+ xlabel('x'); ylabel('y');
+
+function h = arrow_demo3(in)
+ % set the view
+ axlim = in.axlim;
+ axis(axlim);
+ zlabel('z');
+ %set(in.hs,'FaceColor','interp');
+ view(viewmtx(-37.5,30,20));
+ title(['Demo of the capabilities of the ARROW function in 3-D']);
+
+ % Normal blue arrow
+ h1 = feval(mfilename,[axlim(1) axlim(4) 4],[-.8 1.2 4], ...
+ 'EdgeColor','b','FaceColor','b');
+
+ % Normal white arrow, clipped by the surface
+ h2 = feval(mfilename,axlim([1 4 6]),[0 2 4]);
+ t=text(-2.4,2.7,7.7,'arrow clipped by surf');
+
+ % Baseangle<90
+ h3 = feval(mfilename,[3 .125 3.5],[1.375 0.125 3.5],30,50);
+ t2=text(3.1,.125,3.5,'local maximum');
+
+ % Baseangle<90, fill and edge colors different
+ h4 = feval(mfilename,axlim(1:2:5)*.5,[0 0 0],36,60,25, ...
+ 'EdgeColor','b','FaceColor','c');
+ t3=text(axlim(1)*.5,axlim(3)*.5,axlim(5)*.5-.75,'origin');
+ set(t3,'HorizontalAlignment','center');
+
+ % Baseangle>90, black fill
+ h5 = feval(mfilename,[-2.9 2.9 3],[-1.3 .4 3.2],30,120,[],6, ...
+ 'EdgeColor','r','FaceColor','k','LineWidth',2);
+
+ % Baseangle>90, no fill
+ h6 = feval(mfilename,[-2.9 2.9 1.3],[-1.3 .4 1.5],30,120,[],6, ...
+ 'EdgeColor','r','FaceColor','none','LineWidth',2);
+
+ % Stick arrow
+ h7 = feval(mfilename,[-1.6 -1.65 -6.5],[0 -1.65 -6.5],[],16,16);
+ t4=text(-1.5,-1.65,-7.25,'global mininum');
+ set(t4,'HorizontalAlignment','center');
+
+ % Normal, black fill
+ h8 = feval(mfilename,[-1.4 0 -7.2],[-1.4 0 -3],'FaceColor','k');
+ t5=text(-1.5,0,-7.75,'local minimum');
+ set(t5,'HorizontalAlignment','center');
+
+ % Gray fill, crossdir specified, 'LineStyle' --
+ h9 = feval(mfilename,[-3 2.2 -6],[-3 2.2 -.05],36,[],27,6,[],[0 -1 0], ...
+ 'EdgeColor','k','FaceColor',.75*[1 1 1],'LineStyle','--');
+
+ % a series of normal arrows, linearly spaced, crossdir specified
+ h10y=(0:4)'/3;
+ h10 = feval(mfilename,[-3*ones(size(h10y)) h10y -6.5*ones(size(h10y))], ...
+ [-3*ones(size(h10y)) h10y -.05*ones(size(h10y))], ...
+ 12,[],[],[],[],[0 -1 0]);
+
+ % a series of normal arrows, linearly spaced
+ h11x=(1:.33:2.8)';
+ h11 = feval(mfilename,[h11x -3*ones(size(h11x)) 6.5*ones(size(h11x))], ...
+ [h11x -3*ones(size(h11x)) -.05*ones(size(h11x))]);
+
+ % series of magenta arrows, radially oriented, crossdir specified
+ h12x=2; h12y=-3; h12z=axlim(5)/2; h12xr=1; h12zr=h12z; ir=.15;or=.81;
+ h12t=(0:11)'/6*pi;
+ h12 = feval(mfilename, ...
+ [h12x+h12xr*cos(h12t)*ir h12y*ones(size(h12t)) ...
+ h12z+h12zr*sin(h12t)*ir],[h12x+h12xr*cos(h12t)*or ...
+ h12y*ones(size(h12t)) h12z+h12zr*sin(h12t)*or], ...
+ 10,[],[],[],[], ...
+ [-h12xr*sin(h12t) zeros(size(h12t)) h12zr*cos(h12t)],...
+ 'FaceColor','none','EdgeColor','m');
+
+ % series of normal arrows, tangentially oriented, crossdir specified
+ or13=.91; h13t=(0:.5:12)'/6*pi;
+ locs = [h12x+h12xr*cos(h13t)*or13 h12y*ones(size(h13t)) h12z+h12zr*sin(h13t)*or13];
+ h13 = feval(mfilename,locs(1:end-1,:),locs(2:end,:),6);
+
+ % arrow with no line ==> oriented downwards
+ h14 = feval(mfilename,[3 3 .100001],[3 3 .1],30);
+ t6=text(3,3,3.6,'no line'); set(t6,'HorizontalAlignment','center');
+
+ % arrow with arrowheads at both ends
+ h15 = feval(mfilename,[-.5 -3 -3],[1 -3 -3],'Ends','both','FaceColor','g', ...
+ 'Length',20,'Width',3,'CrossDir',[0 0 1],'TipAngle',25);
+
+ h=[h1;h2;h3;h4;h5;h6;h7;h8;h9;h10;h11;h12;h13;h14;h15];
+
+function h = arrow_demo2(in)
+ axlim = in.axlim;
+ dolog = 1;
+ if (dolog), set(in.hs,'YData',10.^get(in.hs,'YData')); end;
+ shading('interp');
+ view(2);
+ title(['Demo of the capabilities of the ARROW function in 2-D']);
+ hold on; [C,H]=contour(in.x,in.y,in.z,20,'-'); hold off;
+ for k=H',
+ set(k,'ZData',(axlim(6)+1)*ones(size(get(k,'XData'))),'Color','k');
+ if (dolog), set(k,'YData',10.^get(k,'YData')); end;
+ end;
+ if (dolog), axis([axlim(1:2) 10.^axlim(3:4)]); set(gca,'YScale','log');
+ else, axis(axlim(1:4)); end;
+
+ % Normal blue arrow
+ start = [axlim(1) axlim(4) axlim(6)+2];
+ stop = [in.x(in.iii) in.y(in.iii) axlim(6)+2];
+ if (dolog), start(:,2)=10.^start(:,2); stop(:,2)=10.^stop(:,2); end;
+ h1 = feval(mfilename,start,stop,'EdgeColor','b','FaceColor','b');
+
+ % three arrows with varying fill, width, and baseangle
+ start = [-3 -3 10; -3 -1.5 10; -1.5 -3 10];
+ stop = [-.03 -.03 10; -.03 -1.5 10; -1.5 -.03 10];
+ if (dolog), start(:,2)=10.^start(:,2); stop(:,2)=10.^stop(:,2); end;
+ h2 = feval(mfilename,start,stop,24,[90;60;120],[],[0;0;4],'Ends',str2mat('both','stop','stop'));
+ set(h2(2),'EdgeColor',[0 .35 0],'FaceColor',[0 .85 .85]);
+ set(h2(3),'EdgeColor','r','FaceColor',[1 .5 1]);
+ h=[h1;h2];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/dot_to_graph.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/dot_to_graph.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,121 @@
+function [Adj, labels, x, y] = dot_to_graph(filename)
+% [Adj, labels, x, y] = dot_to_graph(filename)
+% Extract a matrix representation, node labels, and node position coordinates
+% from a file in GraphViz format http://www.research.att.com/sw/tools/graphviz
+%
+% INPUTS:
+% 'filename' - the file in DOT format containing the graph layout.
+% OUTPUT:
+% 'Adj' - an adjacency matrix representation of the graph in 'filename';
+% 'labels' - a character array with the names of the nodes of the graph;
+% 'x' - a row vector with the x-coordinates of the nodes in 'filename';
+% 'y' - a row vector with the y-coordinates of the nodes in 'filename'.
+%
+% WARNINGS: not guaranted to parse ANY GraphViz file. Debugged on undirected
+% sample graphs from GraphViz(Heawood, Petersen, ER, ngk10_4, process).
+% Complaines about RecursionLimit set only to 500 on huge graphs.
+% Ignores singletons (disjoint nodes).
+% Sample DOT code "ABC.dot", read by [Adj, labels, x, y] = dot_to_graph('ABC.dot')
+% digraph G {
+% A [pos="28,31"];
+% B [pos="74,87"];
+% A -- B [pos="e,61,71 41,47 46,53 50,58 55,64"];
+% }
+% last modified: Jan 2004
+% by Alexi Savov: asavov @wustl.edu | http://artsci.wustl.edu/~azsavov
+% Leon Peshkin: pesha @ai.mit.edu | http://www.ai.mit.edu/~pesha
+% Tom Minka
+
+if ~exist(filename) % Checks whether the specified file exists.
+ error('* * * File does not exist or could not be found. * * *');
+end;
+
+lines = textread(filename,'%s','delimiter','\n','commentstyle','c'); % Read file into cell array
+dot_lines = strvcat(lines); % of lines, ignoring C-style comments
+
+if findstr(dot_lines(1,:), 'graph ') == [] % Is this a DOT file ?
+ error('* * * File does not appear to be in valid DOT format. * * *');
+end;
+
+Nlns = size(dot_lines,1); % The number of lines;
+nodes = {};
+unread = 1:Nlns; % 'unread' list of lines which has not been examined yet
+edge_id = 1;
+Adj = [];
+for line_ndx = 1:Nlns % This section sets the adjacency matrix A(Lnode,Rnode) = edge_id.
+ line = dot_lines(line_ndx,:);
+ Ddash_pos = strfind(line, ' -- ') + 1; % double dash positions
+ arrow_pos = strfind(line, ' -> ') + 1; % arrow dash positions
+ tokens = strread(line,'%s','delimiter',' "');
+ left_bound = 1;
+ for dash_pos = [Ddash_pos arrow_pos]; % if empty - not a POS line
+ Lnode = sscanf(line(left_bound:dash_pos -2), '%s');
+ Rnode = sscanf(line(dash_pos +3 : length(line)-1),'%s',1);
+ Lndx = strmatch(Lnode, nodes, 'exact');
+ Rndx = strmatch(Rnode, nodes, 'exact');
+ if isempty(Lndx) % extend our list of nodes
+ nodes{end+1} = Lnode;
+ Lndx = length(nodes);
+ end
+ if isempty(Rndx)
+ nodes{end+1} = Rnode;
+ Rndx = length(nodes);
+ end
+ Adj(Lndx, Rndx) = edge_id;
+ if ismember(dash_pos, Ddash_pos) % The edge is undirected, A(Rndx,LndxL) is also set to 1;
+ Adj(Rndx, Lndx) = edge_id;
+ end
+ edge_id = edge_id + 1;
+ left_bound = dash_pos + 3;
+ unread = setdiff(unread, line_ndx);
+ end
+end
+Nvrt = length(nodes); % number of vertices we found [Do we ever have singleton vertices ???]
+% nodes = strvcat(nodes); % convert to the searchable array
+x = zeros(1, Nvrt);
+y = zeros(1, Nvrt);
+labels = nodes;
+% Find node's position coordinates if they are contained in 'filename'.
+for line_ndx = unread % Look for node's coordinates among the 'unread' lines.
+ line = dot_lines(line_ndx,:);
+ bra_pos = strfind(line, '['); % has to have "[" if it has the label
+ lst_node = 0;
+ for node = 1:Nvrt % look through the list of nodes
+ % THE NEXT STATEMENT we assume no node is substring of any other node
+ lbl_pos = strfind(line, nodes{node});
+ if (~isempty(lbl_pos) & ~isempty(bra_pos) & (x(node) == 0)) % make sure we have not seen it
+ if (lbl_pos(1) < bra_pos(1)) % label has to be to the left of bracket
+ lst_node = node;
+ end
+ end
+ end
+ if lst_node
+ pos_pos = strfind(line, 'pos'); % position of the "pos"
+ if ~isempty(pos_pos) % this line contains SOME position
+ [node_pos] = sscanf(line(pos_pos:end), ' pos = "%d,%d"')';
+ x(lst_node) = node_pos(1);
+ y(lst_node) = node_pos(2);
+ end
+ % minka
+ label_pos = strfind(line, 'label'); % position of the "label"
+ if ~isempty(label_pos)
+ label_end = strfind(line(label_pos:end),',');
+ labels{lst_node} = unquote(line(label_pos+(6:label_end(1)-2)));
+ end
+ end
+end
+
+if (isempty(find(x)) & (nargout > 2)) % If coordinates were requested, but not found in 'filename'.
+ warning('File does not contain node coordinates.');
+end;
+if ~(size(Adj,1)==size(Adj,2)) % Make sure Adj is a square matrix. ?
+ Adj = eye(max(size(Adj)),size(Adj,1))*Adj*eye(size(Adj,2),max(size(Adj)));
+end;
+x = .9*(x-min(x))/range(x)+.05; % normalise and push off margins
+y = .9*(y-min(y))/range(y)+.05;
+
+
+
+function s = unquote(s)
+
+s = strrep(s,'"','');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/draw_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/draw_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,99 @@
+function [x, y, h] = draw_dbn(adj, inter, flip_intra, K, labels, node_t, x, y)
+% DRAW_LAYOUT_DBN Draws a layout for a Dynamical Belief Network
+%
+% [] = DRAW_LAYOUT_DBN(INTRA, INTER, )
+%
+% Inputs :
+% INTRA, INTER : Adjacency matrices
+% FLIP_FLAG : Transposes the DAG layout obtained from INTRA connections
+% If X1, Y1 are specified, FLIP_FLAG has no effect.
+% K : Unfold K times
+% LABELS - if -1, we use 1:N*K
+% Rest : See DRAW_LAYOUT
+%
+% Outputs :
+% Xi, Yi : Coordinates of nodes (for i'th timeslice) on the unit square
+% H : Object Handles
+%
+% Usage Example : draw_layout_dbn(intra, inter, 1);
+% draw_layout_dbn(intra, inter);
+%
+% Note :
+% See also DRAW_GRAPH
+
+% Uses : DRAW_GRAPH
+
+% Change History :
+% Date Time Prog Note
+% 17-Apr-2000 1:02 PM ATC Created under MATLAB 5.3.1.29215a (R11.1)
+
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+N = size(adj,1);
+if nargin<3,
+ flip_intra = 0;
+end;
+
+if nargin<4,
+ K = 2;
+end;
+
+if K<2 | K>7, error('2<=K<=7 must hold..'); end;
+
+
+if nargin<5
+% labels = cellstr(char(zeros(N,1)+double('+')));
+% labels = cellstr(int2str((1:N)'));
+ labels = cellstr(char((0:N-1)'+double('a')));
+end;
+
+if nargin<6,
+ node_t = zeros(N,1);
+% node_t = rand(N,1) > 0.5;
+end;
+
+if nargin<7,
+ [x1 y1] = make_layout(adj);
+ if flip_intra, tmp = x1; x1 = y1; y1 = tmp; end;
+end;
+
+mid = round(K/2);
+
+
+xi = x1(:)-1;
+x = [];
+y = repmat(y1(:), [K 1]);
+node_t2 = repmat(node_t(:), [K 1]);
+
+if isa(labels,'double') & labels==-1 % KPM
+ lb = num2strcell(1:N*K);
+else
+ lb = {};
+ for i=1:K,
+ labels1 = labels(:);
+ if i==mid, str = ''; else str = sprintf('%+d',i-mid); end;
+ for i=1:N,
+ labels1{i} = [labels1{i} '(t' str ')'];
+ end;
+ lb = [lb; labels1(:)];
+ end;
+end
+
+dag = zeros(N*K);
+
+for i=1:K,
+ xi = xi+1;
+ x = [x; xi];
+
+ idx = ((i-1)*N+1):i*N;
+ dag(idx,idx) = adj;
+ if i .8;
+% Adj2 = triu(Adj,1)+ triu(Adj,1)' + diag(zeros(size,1));
+% draw_dot(Adj2)
+
+% Original: Leon Peshkin
+% Modified by Tom Minka
+
+% minka
+N = size(adj,1);
+unique_labels = cellstr(num2str((1:N)','%-1d'));
+labels = unique_labels;
+isbox = zeros(N,1);
+rotate_flag = 1;
+tolerance = 0.001;
+options = '';
+for i = 1:2:length(varargin)
+ switch varargin{i}
+ case 'node_label', labels = varargin{i+1};
+ % replace with unique labels
+ varargin{i+1} = unique_labels;
+ case 'isbox', isbox = varargin{i+1};
+ case 'rotate', rotate_flag = varargin{i+1};
+ case 'tolerance', tolerance = varargin{i+1};
+ case 'start', start = varargin{i+1};
+ options = [options ' -Gstart=' num2str(start)];
+ case 'options', options = [options ' ' varargin{i+1}];
+ end
+end
+
+if ispc, shell = 'dos'; else, shell = 'unix'; end % Which OS ?
+
+cmdline = strcat(shell,'(''neato -V'')');
+status = eval(cmdline);
+%[status, result] = dos('neato -V'); % request version to check NEATO
+if status == 1, fprintf('Complaining \n'); exit, end
+
+tmpDOTfile = '_GtDout.dot'; % to be platform independant no use of directories
+tmpLAYOUT = '_LAYout.dot';
+graph_to_dot(adj > 0, 'filename', tmpDOTfile, 'node_label', unique_labels, varargin{:}); % save in file
+
+cmdline = strcat([shell '(''neato -Tdot ' tmpDOTfile options ' -o ' tmpLAYOUT ''')']); % preserve trailing spaces
+status = eval(cmdline); % get NEATO todo layout
+
+[adj, permuted_labels, x, y] = dot_to_graph(tmpLAYOUT); % load layout
+delete(tmpLAYOUT); delete(tmpDOTfile); % clean up temporary files
+
+% permute the original arguments to match permuted_labels.
+order = [];
+for i = 1:length(permuted_labels)
+ j = strmatch(permuted_labels{i},unique_labels,'exact');
+ order(i) = j(1);
+end
+labels = labels(order);
+isbox = isbox(order);
+if rotate_flag
+ [x,y] = best_rotation(x,y,tolerance);
+end
+
+figure(1); clf; axis square % now plot
+[x, y, h] = draw_graph(adj>0, labels, isbox, x, y, varargin{:});
+
+
+function [x,y] = best_rotation(x,y,h)
+% Rotate the points to maximize the horizontal and vertical alignment.
+% Written by Tom Minka.
+
+xm = mean(x);
+ym = mean(y);
+xr = max(x)-min(x);
+yr = max(y)-min(y);
+x = (x-xm)/xr;
+y = (y-ym)/yr;
+
+xy = [x(:) y(:)];
+if 1
+ angle = fminbnd(@rotation_cost,-pi/4,pi/4,[],xy,h);
+else
+ angles = linspace(-pi/4,pi/4,40);
+ e = [];
+ for i = 1:length(angles)
+ e(i) = rotation_cost(angles(i),xy,h);
+ end
+ %figure(2)
+ %plot(angles*180/pi,e)
+ angle = angles(argmin(e));
+end
+%angle*180/pi
+c = cos(angle); s = sin(angle);
+xy = xy*[c s; -s c];
+
+x = xy(:,1)*xr+xm;
+y = xy(:,2)*yr+ym;
+
+
+function e = rotation_cost(angle,xy,h)
+% xy is 2-column matrix.
+% e is small if many x's and y's are aligned.
+
+c = cos(angle); s = sin(angle);
+xy = xy*[c s; -s c];
+dx = sqdist(xy(:,1)',xy(:,1)');
+dy = sqdist(xy(:,2)',xy(:,2)');
+dx = setdiag(dx,Inf);
+dy = setdiag(dy,Inf);
+e = sum(exp(-dx(:)/h))+sum(exp(-dy(:)/h));
+e = -e;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/draw_graph.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/draw_graph.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,332 @@
+function [x, y, h] = draw_graph(adj, labels, node_t, x, y, varargin)
+% DRAW_LAYOUT Draws a layout for a graph
+%
+% [X, Y, H] = DRAW_LAYOUT(ADJ, )
+%
+% Inputs :
+% ADJ : Adjacency matrix (source, sink)
+% LABELS : Cell array containing labels
+% ISBOX : 1 if node is a box, 0 if oval
+% X, Y, : Coordinates of nodes on the unit square
+%
+% Outputs :
+% X, Y : Coordinates of nodes on the unit square
+% H : Object handles
+%
+% Usage Example : [x, y] = draw_layout([0 1;0 0], {'Hidden','Visible'}, [1 0]');
+%
+% h(i,1) is the text handle - color
+% h(i,2) is the circle handle - facecolor
+%
+% See also MAKE_LAYOUT
+
+% Change History :
+% Date Time Prog Note
+% 13-Apr-2000 9:06 PM ATC Created under MATLAB 5.3.1.29215a (R11.1)
+%
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+adj = double(adj);
+N = size(adj,1);
+if nargin<2,
+ labels = cellstr(int2str((1:N)'));
+end
+
+if nargin<3,
+ node_t = zeros(N,1);
+else
+ node_t = node_t(:);
+end;
+
+axis([0 1 0 1]);
+set(gca,'XTick',[],'YTick',[],'box','on');
+% axis('square');
+%colormap(flipud(gray));
+
+if nargin<4,
+ [x y] = make_layout(adj);
+end;
+
+idx1 = find(node_t==0); h1 = []; wd1=[];
+if ~isempty(idx1)
+ [h1 wd1] = textoval(x(idx1), y(idx1), labels(idx1), varargin{:});
+end;
+
+idx2 = find(node_t~=0); h2 = []; wd2 = [];
+if ~isempty(idx2)
+ [h2 wd2] = textbox(x(idx2), y(idx2), labels(idx2), varargin{:});
+end;
+
+wd = zeros(size(wd1,1)+size(wd2,1),2);
+if ~isempty(idx1), wd(idx1, :) = wd1; end;
+if ~isempty(idx2), wd(idx2, :) = wd2; end;
+
+% bug: this code assumes [x y] is the center of each box and oval, which
+% isn't exactly true.
+h_edge = [];
+for i=1:N,
+ j = find(adj(i,:)==1);
+ for k=j,
+ if x(k)-x(i)==0,
+ sign = 1;
+ if y(i)>y(k), alpha = -pi/2; else alpha = pi/2; end;
+ else
+ alpha = atan((y(k)-y(i))/(x(k)-x(i)));
+ if x(i)2,
+ h = zeros(length(wd),2);
+ if ~isempty(idx1),
+ h(idx1,:) = h1;
+ end;
+ if ~isempty(idx2),
+ h(idx2,:) = h2;
+ end;
+end;
+
+%%%%%
+
+function [t, wd] = textoval(x, y, str, varargin)
+% TEXTOVAL Draws an oval around text objects
+%
+% [T, WIDTH] = TEXTOVAL(X, Y, STR)
+% [..] = TEXTOVAL(STR) % Interactive
+%
+% Inputs :
+% X, Y : Coordinates
+% TXT : Strings
+%
+% Outputs :
+% T : Object Handles
+% WIDTH : x and y Width of ovals
+%
+% Usage Example : [t] = textoval('Visit to Asia?');
+%
+%
+% Note :
+% See also TEXTBOX
+
+% Uses :
+
+% Change History :
+% Date Time Prog Note
+% 15-Jun-1998 10:36 AM ATC Created under MATLAB 5.1.0.421
+% 12-Mar-2004 10:00 AM minka Changed placement/sizing.
+%
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+temp = [];
+textProperties = {'BackgroundColor','Color','FontAngle','FontName','FontSize','FontUnits','FontWeight','Rotation'};
+varargin = argfilter(varargin,textProperties);
+
+if nargin == 1
+ str = x;
+end
+if ~isa(str,'cell') str=cellstr(str); end;
+N = length(str);
+wd = zeros(N,2);
+for i=1:N,
+ if nargin == 1
+ [x, y] = ginput(1);
+ end
+ tx = text(x(i),y(i),str{i},'HorizontalAlignment','center',varargin{:});
+ % minka
+ [ptc wx wy] = draw_oval(tx);
+ wd(i,:) = [wx wy];
+ % draw_oval will paint over the text, so need to redraw it
+ delete(tx);
+ tx = text(x(i),y(i),str{i},'HorizontalAlignment','center',varargin{:});
+ temp = [temp; tx ptc];
+end
+if nargout>0, t = temp; end;
+
+%%%%%%%%%
+
+
+function [ptc, wx, wy] = draw_oval(tx, x, y)
+% Draws an oval box around a tex object
+sz = get(tx,'Extent');
+% minka
+wy = 2/3*sz(4);
+wx = 2/3*sz(3);
+x = sz(1)+sz(3)/2;
+y = sz(2)+sz(4)/2;
+ptc = ellipse(x, y, wx, wy);
+set(ptc, 'FaceColor','w');
+
+
+%%%%%%%%%%%%%
+
+function [p] = ellipse(x, y, rx, ry, c)
+% ELLIPSE Draws Ellipse shaped patch objects
+%
+% [
] = ELLIPSE(X, Y, Rx, Ry, C)
+%
+% Inputs :
+% X : N x 1 vector of x coordinates
+% Y : N x 1 vector of y coordinates
+% Rx, Ry : Radii
+% C : Color index
+%
+%
+% Outputs :
+% P = Handles of Ellipse shaped path objects
+%
+% Usage Example : [] = ellipse();
+%
+%
+% Note :
+% See also
+
+% Uses :
+
+% Change History :
+% Date Time Prog Note
+% 27-May-1998 9:55 AM ATC Created under MATLAB 5.1.0.421
+
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+if (nargin < 2) error('Usage Example : e = ellipse([0 1],[0 -1],[1 0.5],[2 0.5]); '); end;
+if (nargin < 3) rx = 0.1; end;
+if (nargin < 4) ry = rx; end;
+if (nargin < 5) c = 1; end;
+
+if length(c)==1, c = ones(size(x)).*c; end;
+if length(rx)==1, rx = ones(size(x)).*rx; end;
+if length(ry)==1, ry = ones(size(x)).*ry; end;
+
+n = length(x);
+p = zeros(size(x));
+t = 0:pi/30:2*pi;
+for i=1:n,
+ px = rx(i)*cos(t)+x(i);
+ py = ry(i)*sin(t)+y(i);
+ p(i) = patch(px,py,c(i));
+end;
+
+if nargout>0, pp = p; end;
+
+%%%%%
+
+function [t, wd] = textbox(x,y,str,varargin)
+% TEXTBOX Draws A Box around the text
+%
+% [T, WIDTH] = TEXTBOX(X, Y, STR)
+% [..] = TEXTBOX(STR)
+%
+% Inputs :
+% X, Y : Coordinates
+% TXT : Strings
+%
+% Outputs :
+% T : Object Handles
+% WIDTH : x and y Width of boxes
+%%
+% Usage Example : t = textbox({'Ali','Veli','49','50'});
+%
+%
+% Note :
+% See also TEXTOVAL
+
+% Uses :
+
+% Change History :
+% Date Time Prog Note
+% 09-Jun-1998 11:43 AM ATC Created under MATLAB 5.1.0.421
+% 12-Mar-2004 10:00 AM minka Changed placement/sizing.
+%
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+temp = [];
+textProperties = {'BackgroundColor','Color','FontAngle','FontName','FontSize','FontUnits','FontWeight','Rotation'};
+varargin = argfilter(varargin,textProperties);
+
+if nargin == 1
+ str = x;
+end
+if ~isa(str,'cell') str=cellstr(str); end;
+N = length(str);
+wd = zeros(N,2);
+for i=1:N,
+ if nargin == 1
+ [x, y] = ginput(1);
+ end
+ tx = text(x(i),y(i),str{i},'HorizontalAlignment','center',varargin{:});
+ % minka
+ [ptc wx wy] = draw_box(tx);
+ wd(i,:) = [wx wy];
+ % draw_box will paint over the text, so need to redraw it
+ delete(tx);
+ tx = text(x(i),y(i),str{i},'HorizontalAlignment','center',varargin{:});
+ temp = [temp; tx ptc];
+end;
+
+if nargout>0, t = temp; end;
+
+
+function [ptc, wx, wy] = draw_box(tx)
+% Draws a box around a text object
+sz = get(tx,'Extent');
+% minka
+wy = 1/2*sz(4);
+wx = 1/2*sz(3);
+x = sz(1)+sz(3)/2;
+y = sz(2)+sz(4)/2;
+ptc = patch([x-wx x+wx x+wx x-wx], [y+wy y+wy y-wy y-wy],'w');
+set(ptc, 'FaceColor','w');
+
+
+
+function args = argfilter(args,keep)
+%ARGFILTER Remove unwanted arguments.
+% ARGFILTER(ARGS,KEEP), where ARGS = {'arg1',value1,'arg2',value2,...},
+% returns a new argument list where only the arguments named in KEEP are
+% retained. KEEP is a character array or cell array of strings.
+
+% Written by Tom Minka
+
+if ischar(keep)
+ keep = cellstr(keep);
+end
+i = 1;
+while i < length(args)
+ if ~ismember(args{i},keep)
+ args = args(setdiff(1:length(args),[i i+1]));
+ else
+ i = i + 2;
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/draw_graph_test.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/draw_graph_test.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+% TEST_LAYOUT Script to test some bayesian net layouts
+%
+
+% Change History :
+% Date Time Prog Note
+% 13-Apr-2000 10:40 PM ATC Created under MATLAB 5.3.1.29215a (R11.1)
+
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+%bnet = mk_asia_bnet;
+%draw_graph(bnet.dag);
+
+% Make the following network (from Jensen (1996) p84 fig 4.17)
+% 1
+% / | \
+% 2 3 4
+% | | |
+% 5 6 7
+% \/ \/
+% 8 9
+% where all arcs point downwards
+
+disp('plot directed')
+clf;
+
+N = 9;
+dag = zeros(N,N);
+dag(1,2)=1; dag(1,3)=1; dag(1,4)=1;
+dag(2,5)=1; dag(3,6)=1; dag(4,7)=1;
+dag(5,8)=1; dag(6,8)=1; dag(6,9)=1; dag(7,9) = 1;
+
+draw_graph(dag);
+
+pause
+clf
+disp('plot undirected')
+udag = [dag+dag'];
+draw_graph(udag);
+
+pause
+clf
+disp('plot mixed')
+mg = [dag];
+mg(2,1) = 1; mg(8,5) = 1;
+draw_graph(mg);
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/draw_hmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/draw_hmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,117 @@
+function draw_hmm(A, varargin)
+% DRAW_HMM Make a picture of the HMM using dotty
+% function draw_hmm(A, ...)
+%
+% For details on dotty, see http://www.research.att.com/sw/tools/graphviz
+%
+% If A(i,j) > thresh, we draw and arc from state i to state j.
+%
+% Optional arguments (name/value pairs) [default]
+%
+% thresh - [1e-1]
+% obsprob - If B(i,o) > 0, we include "o" in the name of state i.
+% e.g., if state 5 emits 1,3,7, its label becomes "5: 1 3 7".
+% startprob - ifstartprob(i) > 0, the state name will be prefixed with "+".
+% endprob - if endprob(i) > 0, the state name will be appended with "-".
+% filename - if [], we write to 'tmp.dot', convert this to 'tmp.ps'
+% using 'dot -Tps tmp.dot -o tmp.ps', and then call ghostview to display the result.
+% dot and gv must be on your system path.
+% If filename ~= [], we just generate the dot file, and do not
+% convert it to postscript or call ghostview.
+
+[thresh, B, startprob, endprob, filename] = ...
+ process_options(varargin, 'thresh', 1e-1, 'obsprob', [], 'startprob', [], 'endprob', [], ...
+ 'filename', []);
+
+Q = length(A);
+
+arclabel = cell(Q,Q);
+G = zeros(Q,Q);
+for i=1:Q
+ for j=1:Q
+ if A(i,j) < thresh
+ arclabel{i,j} = '';
+ else
+ G(i,j) = 1;
+ arclabel{i,j} = sprintf('%5.3f', A(i,j));
+ end
+ end
+end
+
+
+nodelabel = cell(1,Q);
+for i=1:Q
+ % annotate start/stop states
+ if ~isempty(startprob) & ~approxeq(startprob(i), 0)
+ start = '+';
+ else
+ start = '';
+ end
+ if ~isempty(endprob) & ~approxeq(hmm.endprob(i), 0)
+ stop = '-';
+ else
+ stop = '';
+ end
+ label = sprintf('%s%d%s :', start, i, stop);
+
+ if ~isempty(B)
+ output_label = mk_output_label(B);
+ label = strcat(label, output_label);
+ end
+
+ nodelabel{i} = label;
+end
+
+
+if isempty(filename)
+ filename = 'tmp.dot';
+ %mkdot(G, filename, arclabel, nodelabel)
+ graph_to_dot(G, 'filename', filename, 'arc_label', arclabel, 'node_label', nodelabel);
+ fprintf('converting from .ps to .dot\n')
+ !dot -Tps tmp.dot -o tmp.ps
+ !gv tmp.ps &
+else
+ graph_to_dot(G, 'filename', filename, 'arc_label', arclabel, 'node_label', nodelabel);
+ %mkdot(G, filename, arclabel, nodelabel)
+end
+
+
+%%%%%%%%%
+
+function label = mk_output_label(B)
+
+[Q O] = size(B);
+label = '';
+
+if 0
+ % print most probable symbols
+ for i=1:Q
+ m = max(B(i,:));
+ ndx = find(abs(B(i,:) - repmat(m,1,O)) < 1e-2);
+ %ndx = find(B(i,:)==m);
+ %label = sprintf('%d,', ndx);
+ end
+end
+
+if 0
+ % print prob distrib over all symbols
+ for o=1:O
+ if approxeq(B(i,o), 0)
+ %
+ else
+ label = strcat(label, sprintf('%d(%3.2f),', o, B(i,o)));
+ end
+ end
+end
+
+if 1
+ % print all non-zero symbols
+ chars = ['a' 'b' 'c'];
+ for o=1:O
+ if approxeq(B(i,o), 0)
+ %
+ else
+ label = strcat(label, sprintf('%s', chars(o)));
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/editGraphGUI.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/editGraphGUI.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function g
+%here is how one creates a function ("callback") which does something
+%(prints the node label) when you click on the node's text in Matlab figure.
+%
+% Leon Peshkin http://www.ai.mit.edu/~pesha
+%
+%draw_graph(...)
+
+ % "gca" is the current "axes" object, parent of all objects in figure
+ % "gcbo" is the handle of the object whose callback is being executed
+ % "findall" gives handles to all elements of a given type in the figure
+text_elms = findall(gca,'Type','text');
+for ndx = 1:length(text_elms)
+ callbk = 'my_call(str2num(get(gcbo,''String'')))';
+ set(text_elms(ndx), 'ButtonDownFcn', callbk); % assume the node label is a number
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/graph_to_dot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/graph_to_dot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+function graph_to_dot(adj, varargin)
+%GRAPH_TO_DOT Makes a GraphViz (AT&T) file representing an adjacency matrix
+% graph_to_dot(adj, ...) writes to the specified filename.
+%
+% Optional arguments can be passed as name/value pairs: [default]
+%
+% 'filename' - if omitted, writes to 'tmp.dot'
+% 'arc_label' - arc_label{i,j} is a string attached to the i-j arc [""]
+% 'node_label' - node_label{i} is a string attached to the node i ["i"]
+% 'width' - width in inches [10]
+% 'height' - height in inches [10]
+% 'leftright' - 1 means layout left-to-right, 0 means top-to-bottom [0]
+% 'directed' - 1 means use directed arcs, 0 means undirected [1]
+%
+% For details on graphviz, See http://www.research.att.com/sw/tools/graphviz
+%
+% See also dot_to_graph and draw_dot.
+
+% First version written by Kevin Murphy 2002.
+% Modified by Leon Peshkin, Jan 2004.
+% Bugfix by Tom Minka, Mar 2004.
+
+node_label = []; arc_label = []; % set default args
+width = 10; height = 10;
+leftright = 0; directed = 1; filename = 'tmp.dot';
+
+for i = 1:2:nargin-1 % get optional args
+ switch varargin{i}
+ case 'filename', filename = varargin{i+1};
+ case 'node_label', node_label = varargin{i+1};
+ case 'arc_label', arc_label = varargin{i+1};
+ case 'width', width = varargin{i+1};
+ case 'height', height = varargin{i+1};
+ case 'leftright', leftright = varargin{i+1};
+ case 'directed', directed = varargin{i+1};
+ end
+end
+% minka
+if ~directed
+ adj = triu(adj | adj');
+end
+
+fid = fopen(filename, 'w');
+if directed
+ fprintf(fid, 'digraph G {\n');
+ arctxt = '->';
+ if isempty(arc_label)
+ labeltxt = '';
+ else
+ labeltxt = '[label="%s"]';
+ end
+else
+ fprintf(fid, 'graph G {\n');
+ arctxt = '--';
+ if isempty(arc_label)
+ labeltxt = '[dir=none]';
+ else
+ labeltext = '[label="%s",dir=none]';
+ end
+end
+edgeformat = strcat(['%d ',arctxt,' %d ',labeltxt,';\n']);
+fprintf(fid, 'center = 1;\n');
+fprintf(fid, 'size=\"%d,%d\";\n', width, height);
+if leftright
+ fprintf(fid, 'rankdir=LR;\n');
+end
+Nnds = length(adj);
+for node = 1:Nnds % process nodes
+ if isempty(node_label)
+ fprintf(fid, '%d;\n', node);
+ else
+ fprintf(fid, '%d [ label = "%s" ];\n', node, node_label{node});
+ end
+end
+for node1 = 1:Nnds % process edges
+ arcs = find(adj(node1,:)); % children(adj, node);
+ for node2 = arcs
+ if ~isempty(arc_label)
+ fprintf(fid, edgeformat,node1,node2,arc_label{node1,node2});
+ else
+ fprintf(fid, edgeformat, node1, node2);
+ end
+ end
+end
+fprintf(fid, '}');
+fclose(fid);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/make_layout.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/make_layout.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,170 @@
+function [x, y] = layout_dag(adj)
+% MAKE_LAYOUT Creates a layout from an adjacency matrix
+%
+% [X, Y] = MAKE_LAYOUT(ADJ)
+%
+% Inputs :
+% ADJ = adjacency matrix (source, sink)
+%
+% Outputs :
+% X, Y : Positions of nodes
+%
+% Usage Example : [X, Y] = make_layout(adj);
+%
+%
+% Note : Uses some very simple heuristics, so any other
+% algorithm would create a nicer layout
+%
+% See also
+
+% Uses :
+
+% Change History :
+% Date Time Prog Note
+% 13-Apr-2000 8:25 PM ATC Created under MATLAB 5.3.1.29215a (R11.1)
+
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+N = size(adj,1);
+tps = toposort(adj);
+
+if ~isempty(tps), % is directed ?
+ level = zeros(1,N);
+ for i=tps,
+ idx = find(adj(:,i));
+ if ~isempty(idx),
+ l = max(level(idx));
+ level(i)=l+1;
+ end;
+ end;
+else
+ level = poset(adj,1)'-1;
+end;
+
+y = (level+1)./(max(level)+2);
+y = 1-y;
+x = zeros(size(y));
+for i=0:max(level),
+ idx = find(level==i);
+ offset = (rem(i,2)-0.5)/10;
+ x(idx) = (1:length(idx))./(length(idx)+1)+offset;
+end;
+
+%%%%%%%
+
+function [depth] = poset(adj, root)
+% POSET Identify a partial ordering among the nodes of a graph
+%
+% [DEPTH] = POSET(ADJ,ROOT)
+%
+% Inputs :
+% ADJ : Adjacency Matrix
+% ROOT : Node to start with
+%
+% Outputs :
+% DEPTH : Depth of the Node
+%
+% Usage Example : [depth] = poset(adj,12);
+%
+%
+% Note : All Nodes must be connected
+% See also
+
+% Uses :
+
+% Change History :
+% Date Time Prog Note
+% 17-Jun-1998 12:01 PM ATC Created under MATLAB 5.1.0.421
+
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+adj = adj+adj';
+
+N = size(adj,1);
+depth = zeros(N,1);
+depth(root) = 1;
+queue = root;
+
+while 1,
+ if isempty(queue),
+ if all(depth), break;
+ else
+ root = find(depth==0);
+ root = root(1);
+ depth(root) = 1;
+ queue = root;
+ end;
+ end;
+ r = queue(1); queue(1) = [];
+ idx = find(adj(r,:));
+ idx2 = find(~depth(idx));
+ idx = idx(idx2);
+ queue = [queue idx];
+ depth(idx) = depth(r)+1;
+end;
+
+%%%%%%%%%
+
+function [seq] = toposort(adj)
+% TOPOSORT A Topological ordering of nodes in a directed graph
+%
+% [SEQ] = TOPOSORT(ADJ)
+%
+% Inputs :
+% ADJ : Adjacency Matrix.
+% ADJ(i,j)==1 ==> there exists a directed edge
+% from i to j
+%
+% Outputs :
+% SEQ : A topological ordered sequence of nodes.
+% empty matrix if graph contains cycles.
+%
+% Usage Example :
+% N=5;
+% [l,u] = lu(rand(N));
+% adj = ~diag(ones(1,N)) & u>0.5;
+% seq = toposort(adj);
+%
+%
+% Note :
+% See also
+
+% Uses :
+
+% Change History :
+% Date Time Prog Note
+% 18-May-1998 4:44 PM ATC Created under MATLAB 5.1.0.421
+
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+N = size(adj);
+indeg = sum(adj,1);
+outdeg = sum(adj,2);
+seq = [];
+
+for i=1:N,
+ % Find nodes with indegree 0
+ idx = find(indeg==0);
+ % If can't find than graph contains a cycle
+ if isempty(idx),
+ seq = [];
+ break;
+ end;
+ % Remove the node with the max number of connections
+ [dummy idx2] = max(outdeg(idx));
+ indx = idx(idx2);
+ seq = [seq, indx];
+ indeg(indx)=-1;
+ idx = find(adj(indx,:));
+ indeg(idx) = indeg(idx)-1;
+end;
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/my_call.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/my_call.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+function my_call(value)
+fprintf('%d \n', value); % might check here whether this is a label at all
+ % since we get here by clicking on ANY text in figure
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/GraphViz/process_options.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/GraphViz/process_options.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,132 @@
+% PROCESS_OPTIONS - Processes options passed to a Matlab function.
+% This function provides a simple means of
+% parsing attribute-value options. Each option is
+% named by a unique string and is given a default
+% value.
+%
+% Usage: [var1, var2, ..., varn[, unused]] = ...
+% process_options(args, ...
+% str1, def1, str2, def2, ..., strn, defn)
+%
+% Arguments:
+% args - a cell array of input arguments, such
+% as that provided by VARARGIN. Its contents
+% should alternate between strings and
+% values.
+% str1, ..., strn - Strings that are associated with a
+% particular variable
+% def1, ..., defn - Default values returned if no option
+% is supplied
+%
+% Returns:
+% var1, ..., varn - values to be assigned to variables
+% unused - an optional cell array of those
+% string-value pairs that were unused;
+% if this is not supplied, then a
+% warning will be issued for each
+% option in args that lacked a match.
+%
+% Examples:
+%
+% Suppose we wish to define a Matlab function 'func' that has
+% required parameters x and y, and optional arguments 'u' and 'v'.
+% With the definition
+%
+% function y = func(x, y, varargin)
+%
+% [u, v] = process_options(varargin, 'u', 0, 'v', 1);
+%
+% calling func(0, 1, 'v', 2) will assign 0 to x, 1 to y, 0 to u, and 2
+% to v. The parameter names are insensitive to case; calling
+% func(0, 1, 'V', 2) has the same effect. The function call
+%
+% func(0, 1, 'u', 5, 'z', 2);
+%
+% will result in u having the value 5 and v having value 1, but
+% will issue a warning that the 'z' option has not been used. On
+% the other hand, if func is defined as
+%
+% function y = func(x, y, varargin)
+%
+% [u, v, unused_args] = process_options(varargin, 'u', 0, 'v', 1);
+%
+% then the call func(0, 1, 'u', 5, 'z', 2) will yield no warning,
+% and unused_args will have the value {'z', 2}. This behaviour is
+% useful for functions with options that invoke other functions
+% with options; all options can be passed to the outer function and
+% its unprocessed arguments can be passed to the inner function.
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+function [varargout] = process_options(args, varargin)
+
+% Check the number of input arguments
+n = length(varargin);
+if (mod(n, 2))
+ error('Each option must be a string/value pair.');
+end
+
+% Check the number of supplied output arguments
+if (nargout < (n / 2))
+ error('Insufficient number of output arguments given');
+elseif (nargout == (n / 2))
+ warn = 1;
+ nout = n / 2;
+else
+ warn = 0;
+ nout = n / 2 + 1;
+end
+
+% Set outputs to be defaults
+varargout = cell(1, nout);
+for i=2:2:n
+ varargout{i/2} = varargin{i};
+end
+
+% Now process all arguments
+nunused = 0;
+for i=1:2:length(args)
+ found = 0;
+ for j=1:2:n
+ if strcmpi(args{i}, varargin{j})
+ varargout{(j + 1)/2} = args{i + 1};
+ found = 1;
+ break;
+ end
+ end
+ if (~found)
+ if (warn)
+ warning(sprintf('Option ''%s'' not used.', args{i}));
+ args{i}
+ else
+ nunused = nunused + 1;
+ unused{2 * nunused - 1} = args{i};
+ unused{2 * nunused} = args{i + 1};
+ end
+ end
+end
+
+% Assign the unused arguments
+if (~warn)
+ if (nunused)
+ varargout{nout} = unused;
+ else
+ varargout{nout} = cell(0);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+/README.txt/1.1.1.1/Thu Jun 9 01:22:48 2005//
+/dhmm_em.m/1.1.1.1/Thu Jun 9 01:25:04 2005//
+/dhmm_em_demo.m/1.1.1.1/Sun May 4 22:01:12 2003//
+/dhmm_em_online.m/1.1.1.1/Sun May 4 22:02:58 2003//
+/dhmm_em_online_demo.m/1.1.1.1/Sun May 4 22:04:10 2003//
+/dhmm_logprob.m/1.1.1.1/Sun May 4 22:01:34 2003//
+/dhmm_logprob_brute_force.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dhmm_logprob_path.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dhmm_sample.m/1.1.1.1/Mon May 31 22:19:50 2004//
+/dhmm_sample_endstate.m/1.1.1.1/Sun May 4 22:00:34 2003//
+/fixed_lag_smoother.m/1.1.1.1/Wed Jan 22 17:56:04 2003//
+/fixed_lag_smoother_demo.m/1.1.1.1/Thu Jun 9 01:27:20 2005//
+/fwdback.m/1.1.1.1/Thu Jun 9 01:17:50 2005//
+/gausshmm_train_observed.m/1.1.1.1/Thu Feb 12 23:08:22 2004//
+/mc_sample.m/1.1.1.1/Mon May 24 22:26:34 2004//
+/mc_sample_endstate.m/1.1.1.1/Wed Jan 22 20:32:28 2003//
+/mdp_sample.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/mhmmParzen_train_observed.m/1.1.1.1/Sat Feb 14 02:06:30 2004//
+/mhmm_em.m/1.1.1.1/Sun Feb 8 04:52:42 2004//
+/mhmm_em_demo.m/1.1.1.1/Tue May 13 16:11:22 2003//
+/mhmm_logprob.m/1.1.1.1/Sun May 4 22:11:54 2003//
+/mhmm_sample.m/1.1.1.1/Wed May 26 00:32:28 2004//
+/mk_leftright_transmat.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/mk_rightleft_transmat.m/1.1.1.1/Fri Nov 22 21:45:52 2002//
+/pomdp_sample.m/1.1.1.1/Sun May 4 21:58:20 2003//
+/testHMM.m/1.1.1.1/Thu Jun 9 01:25:50 2005//
+/transmat_train_observed.m/1.1.1.1/Sun Aug 29 12:41:52 2004//
+/viterbi_path.m/1.1.1.1/Sat Oct 23 01:18:22 2004//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/HMM
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/README.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/README.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+Hidden Markov Model (HMM) Toolbox written by Kevin Murphy (1998).
+See http://www.ai.mit.edu/~murphyk/Software/hmm.html for details.
+
+Models
+------
+
+dhmm = HMM with discrete output
+mhmm = HMM with mixture of Gaussians output;
+ Use mhmm with M=1 components to simulate an HMM with a single Gaussian output.
+
+Demos
+-----
+
+mhmm_em_demo
+dhmm_em_demo
+dhmm_em_online_demo
+fixed_lag_smoother_demo
+
+References
+-----------
+
+See "A tutorial on Hidden Markov Models and selected applications in speech recognition",
+ L. Rabiner, 1989, Proc. IEEE 77(2):257--286.
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/dhmm_em.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_em.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,124 @@
+function [LL, prior, transmat, obsmat, nrIterations] = ...
+ dhmm_em(data, prior, transmat, obsmat, varargin)
+% LEARN_DHMM Find the ML/MAP parameters of an HMM with discrete outputs using EM.
+% [ll_trace, prior, transmat, obsmat, iterNr] = learn_dhmm(data, prior0, transmat0, obsmat0, ...)
+%
+% Notation: Q(t) = hidden state, Y(t) = observation
+%
+% INPUTS:
+% data{ex} or data(ex,:) if all sequences have the same length
+% prior(i)
+% transmat(i,j)
+% obsmat(i,o)
+%
+% Optional parameters may be passed as 'param_name', param_value pairs.
+% Parameter names are shown below; default values in [] - if none, argument is mandatory.
+%
+% 'max_iter' - max number of EM iterations [10]
+% 'thresh' - convergence threshold [1e-4]
+% 'verbose' - if 1, print out loglik at every iteration [1]
+% 'obs_prior_weight' - weight to apply to uniform dirichlet prior on observation matrix [0]
+%
+% To clamp some of the parameters, so learning does not change them:
+% 'adj_prior' - if 0, do not change prior [1]
+% 'adj_trans' - if 0, do not change transmat [1]
+% 'adj_obs' - if 0, do not change obsmat [1]
+%
+% Modified by Herbert Jaeger so xi are not computed individually
+% but only their sum (over time) as xi_summed; this is the only way how they are used
+% and it saves a lot of memory.
+
+[max_iter, thresh, verbose, obs_prior_weight, adj_prior, adj_trans, adj_obs] = ...
+ process_options(varargin, 'max_iter', 10, 'thresh', 1e-4, 'verbose', 1, ...
+ 'obs_prior_weight', 0, 'adj_prior', 1, 'adj_trans', 1, 'adj_obs', 1);
+
+previous_loglik = -inf;
+loglik = 0;
+converged = 0;
+num_iter = 1;
+LL = [];
+
+if ~iscell(data)
+ data = num2cell(data, 2); % each row gets its own cell
+end
+
+while (num_iter <= max_iter) & ~converged
+ % E step
+ [loglik, exp_num_trans, exp_num_visits1, exp_num_emit] = ...
+ compute_ess_dhmm(prior, transmat, obsmat, data, obs_prior_weight);
+
+ % M step
+ if adj_prior
+ prior = normalise(exp_num_visits1);
+ end
+ if adj_trans & ~isempty(exp_num_trans)
+ transmat = mk_stochastic(exp_num_trans);
+ end
+ if adj_obs
+ obsmat = mk_stochastic(exp_num_emit);
+ end
+
+ if verbose, fprintf(1, 'iteration %d, loglik = %f\n', num_iter, loglik); end
+ num_iter = num_iter + 1;
+ converged = em_converged(loglik, previous_loglik, thresh);
+ previous_loglik = loglik;
+ LL = [LL loglik];
+end
+nrIterations = num_iter - 1;
+
+%%%%%%%%%%%%%%%%%%%%%%%
+
+function [loglik, exp_num_trans, exp_num_visits1, exp_num_emit, exp_num_visitsT] = ...
+ compute_ess_dhmm(startprob, transmat, obsmat, data, dirichlet)
+% COMPUTE_ESS_DHMM Compute the Expected Sufficient Statistics for an HMM with discrete outputs
+% function [loglik, exp_num_trans, exp_num_visits1, exp_num_emit, exp_num_visitsT] = ...
+% compute_ess_dhmm(startprob, transmat, obsmat, data, dirichlet)
+%
+% INPUTS:
+% startprob(i)
+% transmat(i,j)
+% obsmat(i,o)
+% data{seq}(t)
+% dirichlet - weighting term for uniform dirichlet prior on expected emissions
+%
+% OUTPUTS:
+% exp_num_trans(i,j) = sum_l sum_{t=2}^T Pr(X(t-1) = i, X(t) = j| Obs(l))
+% exp_num_visits1(i) = sum_l Pr(X(1)=i | Obs(l))
+% exp_num_visitsT(i) = sum_l Pr(X(T)=i | Obs(l))
+% exp_num_emit(i,o) = sum_l sum_{t=1}^T Pr(X(t) = i, O(t)=o| Obs(l))
+% where Obs(l) = O_1 .. O_T for sequence l.
+
+numex = length(data);
+[S O] = size(obsmat);
+exp_num_trans = zeros(S,S);
+exp_num_visits1 = zeros(S,1);
+exp_num_visitsT = zeros(S,1);
+exp_num_emit = dirichlet*ones(S,O);
+loglik = 0;
+
+for ex=1:numex
+ obs = data{ex};
+ T = length(obs);
+ %obslik = eval_pdf_cond_multinomial(obs, obsmat);
+ obslik = multinomial_prob(obs, obsmat);
+ [alpha, beta, gamma, current_ll, xi_summed] = fwdback(startprob, transmat, obslik);
+
+ loglik = loglik + current_ll;
+ exp_num_trans = exp_num_trans + xi_summed;
+ exp_num_visits1 = exp_num_visits1 + gamma(:,1);
+ exp_num_visitsT = exp_num_visitsT + gamma(:,T);
+ % loop over whichever is shorter
+ if T < O
+ for t=1:T
+ o = obs(t);
+ exp_num_emit(:,o) = exp_num_emit(:,o) + gamma(:,t);
+ end
+ else
+ for o=1:O
+ ndx = find(obs==o);
+ if ~isempty(ndx)
+ exp_num_emit(:,o) = exp_num_emit(:,o) + sum(gamma(:, ndx), 2);
+ end
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/dhmm_em_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_em_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+O = 3;
+Q = 2;
+
+% "true" parameters
+prior0 = normalise(rand(Q,1));
+transmat0 = mk_stochastic(rand(Q,Q));
+obsmat0 = mk_stochastic(rand(Q,O));
+
+% training data
+T = 5;
+nex = 10;
+data = dhmm_sample(prior0, transmat0, obsmat0, T, nex);
+
+% initial guess of parameters
+prior1 = normalise(rand(Q,1));
+transmat1 = mk_stochastic(rand(Q,Q));
+obsmat1 = mk_stochastic(rand(Q,O));
+
+% improve guess of parameters using EM
+[LL, prior2, transmat2, obsmat2] = dhmm_em(data, prior1, transmat1, obsmat1, 'max_iter', 5);
+LL
+
+% use model to compute log likelihood
+loglik = dhmm_logprob(data, prior2, transmat2, obsmat2)
+% log lik is slightly different than LL(end), since it is computed after the final M step
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/dhmm_em_online.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_em_online.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,80 @@
+function [transmat, obsmat, exp_num_trans, exp_num_emit, gamma, ll] = dhmm_em_online(...
+ prior, transmat, obsmat, exp_num_trans, exp_num_emit, decay, data, ...
+ act, adj_trans, adj_obs, dirichlet, filter_only)
+% ONLINE_EM Adjust the parameters using a weighted combination of the old and new expected statistics
+%
+% [transmat, obsmat, exp_num_trans, exp_num_emit, gamma, ll] = online_em(...
+% prior, transmat, obsmat, exp_num_trans, exp_num_emit, decay, data, act, ...
+% adj_trans, adj_obs, dirichlet, filter_only)
+%
+% 0 < decay < 1, with smaller values meaning the past is forgotten more quickly.
+% (We need to decay the old ess, since they were based on out-of-date parameters.)
+% The other params are as in learn_hmm.
+% We do a single forwards-backwards pass on the provided data, initializing with the specified prior.
+% (If filter_only = 1, we only do a forwards pass.)
+
+if ~exist('act'), act = []; end
+if ~exist('adj_trans'), adj_trans = 1; end
+if ~exist('adj_obs'), adj_obs = 1; end
+if ~exist('dirichlet'), dirichlet = 0; end
+if ~exist('filter_only'), filter_only = 0; end
+
+% E step
+olikseq = multinomial_prob(data, obsmat);
+if isempty(act)
+ [alpha, beta, gamma, ll, xi] = fwdback(prior, transmat, olikseq, 'fwd_only', filter_only);
+else
+ [alpha, beta, gamma, ll, xi] = fwdback(prior, transmat, olikseq, 'fwd_only', filter_only, ...
+ 'act', act);
+end
+
+% Increment ESS
+[S O] = size(obsmat);
+if adj_obs
+ exp_num_emit = decay*exp_num_emit + dirichlet*ones(S,O);
+ T = length(data);
+ if T < O
+ for t=1:T
+ o = data(t);
+ exp_num_emit(:,o) = exp_num_emit(:,o) + gamma(:,t);
+ end
+ else
+ for o=1:O
+ ndx = find(data==o);
+ if ~isempty(ndx)
+ exp_num_emit(:,o) = exp_num_emit(:,o) + sum(gamma(:, ndx), 2);
+ end
+ end
+ end
+end
+
+if adj_trans & (T > 1)
+ if isempty(act)
+ exp_num_trans = decay*exp_num_trans + sum(xi,3);
+ else
+ % act(2) determines Q(2), xi(:,:,1) holds P(Q(1), Q(2))
+ A = length(transmat);
+ for a=1:A
+ ndx = find(act(2:end)==a);
+ if ~isempty(ndx)
+ exp_num_trans{a} = decay*exp_num_trans{a} + sum(xi(:,:,ndx), 3);
+ end
+ end
+ end
+end
+
+
+% M step
+
+if adj_obs
+ obsmat = mk_stochastic(exp_num_emit);
+end
+if adj_trans & (T>1)
+ if isempty(act)
+ transmat = mk_stochastic(exp_num_trans);
+ else
+ for a=1:A
+ transmat{a} = mk_stochastic(exp_num_trans{a});
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/dhmm_em_online_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_em_online_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,93 @@
+% Example of online EM applied to a simple POMDP with fixed action seq
+
+clear all
+
+% Create a really easy model to learn
+rand('state', 1);
+O = 2;
+S = 2;
+A = 2;
+prior0 = [1 0]';
+transmat0 = cell(1,A);
+transmat0{1} = [0.9 0.1; 0.1 0.9]; % long runs of 1s and 2s
+transmat0{2} = [0.1 0.9; 0.9 0.1]; % short runs
+obsmat0 = eye(2);
+
+%prior0 = normalise(rand(S,1));
+%transmat0 = mk_stochastic(rand(S,S));
+%obsmat0 = mk_stochastic(rand(S,O));
+
+T = 10;
+act = [1*ones(1,25) 2*ones(1,25) 1*ones(1,25) 2*ones(1,25)];
+data = pomdp_sample(prior0, transmat0, obsmat0, act);
+%data = sample_dhmm(prior0, transmat0, obsmat0, T, 1);
+
+% Initial guess of params
+rand('state', 2); % different seed!
+transmat1 = cell(1,A);
+for a=1:A
+ transmat1{a} = mk_stochastic(rand(S,S));
+end
+obsmat1 = mk_stochastic(rand(S,O));
+prior1 = prior0; % so it labels states the same way
+
+% Uniformative Dirichlet prior (expected sufficient statistics / pseudo counts)
+e = 0.001;
+ess_trans = cell(1,A);
+for a=1:A
+ ess_trans{a} = repmat(e, S, S);
+end
+ess_emit = repmat(e, S, O);
+
+% Params
+w = 2;
+decay_sched = [0.1:0.1:0.9];
+
+% Initialize
+LL1 = zeros(1,T);
+t = 1;
+y = data(t);
+data_win = y;
+act_win = [1]; % arbitrary initial value
+[prior1, LL1(1)] = normalise(prior1 .* obsmat1(:,y));
+
+% Iterate
+for t=2:T
+ y = data(t);
+ a = act(t);
+ if t <= w
+ data_win = [data_win y];
+ act_win = [act_win a];
+ else
+ data_win = [data_win(2:end) y];
+ act_win = [act_win(2:end) a];
+ prior1 = gamma(:, 2);
+ end
+ d = decay_sched(min(t, length(decay_sched)));
+ [transmat1, obsmat1, ess_trans, ess_emit, gamma, ll] = dhmm_em_online(...
+ prior1, transmat1, obsmat1, ess_trans, ess_emit, d, data_win, act_win);
+ bel = gamma(:, end);
+ LL1(t) = ll/length(data_win);
+ %fprintf('t=%d, ll=%f\n', t, ll);
+end
+
+LL1(1) = LL1(2); % since initial likelihood is for 1 slice
+plot(1:T, LL1, 'rx-');
+
+
+% compare with offline learning
+
+if 0
+rand('state', 2); % same seed as online learner
+transmat2 = cell(1,A);
+for a=1:A
+ transmat2{a} = mk_stochastic(rand(S,S));
+end
+obsmat2 = mk_stochastic(rand(S,O));
+prior2 = prior0;
+[LL2, prior2, transmat2, obsmat2] = dhmm_em(data, prior2, transmat2, obsmat2, ....
+ 'max_iter', 10, 'thresh', 1e-3, 'verbose', 1, 'act', act);
+
+LL2 = LL2 / T
+
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [loglik, errors] = dhmm_logprob(data, prior, transmat, obsmat)
+% LOG_LIK_DHMM Compute the log-likelihood of a dataset using a discrete HMM
+% [loglik, errors] = log_lik_dhmm(data, prior, transmat, obsmat)
+%
+% data{m} or data(m,:) is the m'th sequence
+% errors is a list of the cases which received a loglik of -infinity
+
+if ~iscell(data)
+ data = num2cell(data, 2);
+end
+ncases = length(data);
+
+loglik = 0;
+errors = [];
+for m=1:ncases
+ obslik = multinomial_prob(data{m}, obsmat);
+ [alpha, beta, gamma, ll] = fwdback(prior, transmat, obslik, 'fwd_only', 1);
+ if ll==-inf
+ errors = [errors m];
+ end
+ loglik = loglik + ll;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob_brute_force.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob_brute_force.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function logp = enumerate_HMM_loglik(prior, transmat, obsmat)
+% ENUMERATE_HMM_LOGLIK Compute the log likelihood of a sequence by exhaustive (O(Q^T)) enumeration.
+% logp = enumerate_HMM_loglik(prior, transmat, obsmat)
+%
+% Inputs:
+% prior(i) = Pr(Q(1) = i)
+% transmat(i,j) = Pr(Q(t+1)=j | Q(t)=i)
+% obsmat(i,t) = Pr(y(t) | Q(t)=i)
+
+Q = length(prior);
+T = size(obsmat, 2);
+sizes = repmat(Q, 1, T);
+
+psum = 0;
+for i=1:Q^T
+ qs = ind2subv(sizes, i); % make the state sequence
+ psum = psum + prob_path(prior, transmat, obsmat, qs);
+end
+logp = log(psum)
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob_path.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob_path.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function [ll, p] = prob_path(prior, transmat, obsmat, qs)
+% PROB_PATH Compute the prob. of a specific path (state sequence) through an HMM.
+% [ll, p] = prob_path(prior, transmat, obsmat, states)
+%
+% ll = log prob path
+% p(t) = Pr(O(t)) * Pr(Q(t) -> Q(t+1)) for 1<=t= 2 is the desired window width.
+% Actually, we use d=min(d, t0), where t0 is the current time.
+%
+% alpha(:, t0-d:t0-1) - length d window, excluding t0 (Columns indexed 1..d)
+% obslik(:, t0-d:t0-1) - length d window
+% obsvec - likelihood vector for current observation
+% transmat - transition matrix
+% If we specify the optional 'act' argument, transmat{a} should be a cell array, and
+% act(t0-d:t0) - length d window, last column = current action
+%
+% Output:
+% alpha(:, t0-d+1:t0) - last column = new filtered estimate
+% obslik(:, t0-d+1:t0) - last column = obsvec
+% xi(:, :, t0-d+1:t0-1) - 2 slice smoothed window
+% gamma(:, t0-d+1:t0) - smoothed window
+%
+% As usual, we define (using T=d)
+% alpha(i,t) = Pr(Q(t)=i | Y(1:t))
+% gamma(i,t) = Pr(Q(t)=i | Y(1:T))
+% xi(i,j,t) = Pr(Q(t)=i, Q(t+1)=j | Y(1:T))
+%
+% obslik(i,t) = Pr(Y(t) | Q(t)=i)
+% transmat{a}(i,j) = Pr(Q(t)=j | Q(t-1)=i, A(t)=a)
+
+[S n] = size(alpha);
+d = min(d, n+1);
+if d < 2
+ error('must keep a window of length at least 2');
+end
+
+if ~exist('act')
+ act = ones(1, n+1);
+ transmat = { transmat };
+end
+
+% pluck out last d-1 components from the history
+alpha = alpha(:, n-d+2:n);
+obslik = obslik(:, n-d+2:n);
+
+% Extend window by 1
+t = d;
+obslik(:,t) = obsvec;
+xi = zeros(S, S, d-1);
+xi(:,:,t-1) = normalise((alpha(:,t-1) * obslik(:,t)') .* transmat{act(t)});
+alpha(:,t) = sum(xi(:,:,t-1), 1)';
+
+% Now smooth backwards inside the window
+beta = ones(S, d);
+T = d;
+%fprintf('smooth from %d to 1, i.e., %d to %d\n', d, t0, t0-d+1);
+gamma(:,T) = alpha(:,T);
+for t=T-1:-1:1
+ b = beta(:,t+1) .* obslik(:,t+1);
+ beta(:,t) = normalise(transmat{act(t)} * b);
+ gamma(:,t) = normalise(alpha(:,t) .* beta(:,t));
+ xi(:,:,t) = normalise((transmat{act(t)} .* (alpha(:,t) * b')));
+end
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/fixed_lag_smoother_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/fixed_lag_smoother_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+% Example of fixed lag smoothing
+
+rand('state', 1);
+S = 2;
+O = 2;
+T = 7;
+data = sample_discrete([0.5 0.5], 1, T);
+transmat = mk_stochastic(rand(S,S));
+obsmat = mk_stochastic(rand(S,O));
+obslik = multinomial_prob(data, obsmat);
+prior = [0.5 0.5]';
+
+
+[alpha0, beta0, gamma0, ll0, xi0] = fwdback(prior, transmat, obslik);
+
+w = 3;
+alpha1 = zeros(S, T);
+gamma1 = zeros(S, T);
+xi1 = zeros(S, S, T-1);
+t = 1;
+b = obsmat(:, data(t));
+olik_win = b; % window of conditional observation likelihoods
+alpha_win = normalise(prior .* b);
+alpha1(:,t) = alpha_win;
+for t=2:T
+ [alpha_win, olik_win, gamma_win, xi_win] = ...
+ fixed_lag_smoother(w, alpha_win, olik_win, obsmat(:, data(t)), transmat);
+ alpha1(:,max(1,t-w+1):t) = alpha_win;
+ gamma1(:,max(1,t-w+1):t) = gamma_win;
+ xi1(:,:,max(1,t-w+1):t-1) = xi_win;
+end
+
+e = 1e-1;
+%assert(approxeq(alpha0, alpha1, e));
+assert(approxeq(gamma0(:, T-w+1:end), gamma1(:, T-w+1:end), e));
+%assert(approxeq(xi0(:,:,T-w+1:end), xi1(:,:,T-w+1:end), e));
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/fwdback.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/fwdback.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,197 @@
+function [alpha, beta, gamma, loglik, xi_summed, gamma2] = fwdback(init_state_distrib, ...
+ transmat, obslik, varargin)
+% FWDBACK Compute the posterior probs. in an HMM using the forwards backwards algo.
+%
+% [alpha, beta, gamma, loglik, xi, gamma2] = fwdback(init_state_distrib, transmat, obslik, ...)
+%
+% Notation:
+% Y(t) = observation, Q(t) = hidden state, M(t) = mixture variable (for MOG outputs)
+% A(t) = discrete input (action) (for POMDP models)
+%
+% INPUT:
+% init_state_distrib(i) = Pr(Q(1) = i)
+% transmat(i,j) = Pr(Q(t) = j | Q(t-1)=i)
+% or transmat{a}(i,j) = Pr(Q(t) = j | Q(t-1)=i, A(t-1)=a) if there are discrete inputs
+% obslik(i,t) = Pr(Y(t)| Q(t)=i)
+% (Compute obslik using eval_pdf_xxx on your data sequence first.)
+%
+% Optional parameters may be passed as 'param_name', param_value pairs.
+% Parameter names are shown below; default values in [] - if none, argument is mandatory.
+%
+% For HMMs with MOG outputs: if you want to compute gamma2, you must specify
+% 'obslik2' - obslik(i,j,t) = Pr(Y(t)| Q(t)=i,M(t)=j) []
+% 'mixmat' - mixmat(i,j) = Pr(M(t) = j | Q(t)=i) []
+%
+% For HMMs with discrete inputs:
+% 'act' - act(t) = action performed at step t
+%
+% Optional arguments:
+% 'fwd_only' - if 1, only do a forwards pass and set beta=[], gamma2=[] [0]
+% 'scaled' - if 1, normalize alphas and betas to prevent underflow [1]
+% 'maximize' - if 1, use max-product instead of sum-product [0]
+%
+% OUTPUTS:
+% alpha(i,t) = p(Q(t)=i | y(1:t)) (or p(Q(t)=i, y(1:t)) if scaled=0)
+% beta(i,t) = p(y(t+1:T) | Q(t)=i)*p(y(t+1:T)|y(1:t)) (or p(y(t+1:T) | Q(t)=i) if scaled=0)
+% gamma(i,t) = p(Q(t)=i | y(1:T))
+% loglik = log p(y(1:T))
+% xi(i,j,t-1) = p(Q(t-1)=i, Q(t)=j | y(1:T)) - NO LONGER COMPUTED
+% xi_summed(i,j) = sum_{t=}^{T-1} xi(i,j,t) - changed made by Herbert Jaeger
+% gamma2(j,k,t) = p(Q(t)=j, M(t)=k | y(1:T)) (only for MOG outputs)
+%
+% If fwd_only = 1, these become
+% alpha(i,t) = p(Q(t)=i | y(1:t))
+% beta = []
+% gamma(i,t) = p(Q(t)=i | y(1:t))
+% xi(i,j,t-1) = p(Q(t-1)=i, Q(t)=j | y(1:t))
+% gamma2 = []
+%
+% Note: we only compute xi if it is requested as a return argument, since it can be very large.
+% Similarly, we only compute gamma2 on request (and if using MOG outputs).
+%
+% Examples:
+%
+% [alpha, beta, gamma, loglik] = fwdback(pi, A, multinomial_prob(sequence, B));
+%
+% [B, B2] = mixgauss_prob(data, mu, Sigma, mixmat);
+% [alpha, beta, gamma, loglik, xi, gamma2] = fwdback(pi, A, B, 'obslik2', B2, 'mixmat', mixmat);
+
+if nargout >= 5, compute_xi = 1; else compute_xi = 0; end
+if nargout >= 6, compute_gamma2 = 1; else compute_gamma2 = 0; end
+
+[obslik2, mixmat, fwd_only, scaled, act, maximize, compute_xi, compute_gamma2] = ...
+ process_options(varargin, ...
+ 'obslik2', [], 'mixmat', [], ...
+ 'fwd_only', 0, 'scaled', 1, 'act', [], 'maximize', 0, ...
+ 'compute_xi', compute_xi, 'compute_gamma2', compute_gamma2);
+
+[Q T] = size(obslik);
+
+if isempty(obslik2)
+ compute_gamma2 = 0;
+end
+
+if isempty(act)
+ act = ones(1,T);
+ transmat = { transmat } ;
+end
+
+scale = ones(1,T);
+
+% scale(t) = Pr(O(t) | O(1:t-1)) = 1/c(t) as defined by Rabiner (1989).
+% Hence prod_t scale(t) = Pr(O(1)) Pr(O(2)|O(1)) Pr(O(3) | O(1:2)) ... = Pr(O(1), ... ,O(T))
+% or log P = sum_t log scale(t).
+% Rabiner suggests multiplying beta(t) by scale(t), but we can instead
+% normalise beta(t) - the constants will cancel when we compute gamma.
+
+loglik = 0;
+
+alpha = zeros(Q,T);
+gamma = zeros(Q,T);
+if compute_xi
+ xi_summed = zeros(Q,Q);
+else
+ xi_summed = [];
+end
+
+%%%%%%%%% Forwards %%%%%%%%%%
+
+t = 1;
+alpha(:,1) = init_state_distrib(:) .* obslik(:,t);
+if scaled
+ %[alpha(:,t), scale(t)] = normaliseC(alpha(:,t));
+ [alpha(:,t), scale(t)] = normalise(alpha(:,t));
+end
+assert(approxeq(sum(alpha(:,t)),1))
+for t=2:T
+ %trans = transmat(:,:,act(t-1))';
+ trans = transmat{act(t-1)};
+ if maximize
+ m = max_mult(trans', alpha(:,t-1));
+ %A = repmat(alpha(:,t-1), [1 Q]);
+ %m = max(trans .* A, [], 1);
+ else
+ m = trans' * alpha(:,t-1);
+ end
+ alpha(:,t) = m(:) .* obslik(:,t);
+ if scaled
+ %[alpha(:,t), scale(t)] = normaliseC(alpha(:,t));
+ [alpha(:,t), scale(t)] = normalise(alpha(:,t));
+ end
+ if compute_xi & fwd_only % useful for online EM
+ %xi(:,:,t-1) = normaliseC((alpha(:,t-1) * obslik(:,t)') .* trans);
+ xi_summed = xi_summed + normalise((alpha(:,t-1) * obslik(:,t)') .* trans);
+ end
+ assert(approxeq(sum(alpha(:,t)),1))
+end
+if scaled
+ if any(scale==0)
+ loglik = -inf;
+ else
+ loglik = sum(log(scale));
+ end
+else
+ loglik = log(sum(alpha(:,T)));
+end
+
+if fwd_only
+ gamma = alpha;
+ beta = [];
+ gamma2 = [];
+ return;
+end
+
+%%%%%%%%% Backwards %%%%%%%%%%
+
+beta = zeros(Q,T);
+if compute_gamma2
+ M = size(mixmat, 2);
+ gamma2 = zeros(Q,M,T);
+else
+ gamma2 = [];
+end
+
+beta(:,T) = ones(Q,1);
+%gamma(:,T) = normaliseC(alpha(:,T) .* beta(:,T));
+gamma(:,T) = normalise(alpha(:,T) .* beta(:,T));
+t=T;
+if compute_gamma2
+ denom = obslik(:,t) + (obslik(:,t)==0); % replace 0s with 1s before dividing
+ gamma2(:,:,t) = obslik2(:,:,t) .* mixmat .* repmat(gamma(:,t), [1 M]) ./ repmat(denom, [1 M]);
+ %gamma2(:,:,t) = normaliseC(obslik2(:,:,t) .* mixmat .* repmat(gamma(:,t), [1 M])); % wrong!
+end
+for t=T-1:-1:1
+ b = beta(:,t+1) .* obslik(:,t+1);
+ %trans = transmat(:,:,act(t));
+ trans = transmat{act(t)};
+ if maximize
+ B = repmat(b(:)', Q, 1);
+ beta(:,t) = max(trans .* B, [], 2);
+ else
+ beta(:,t) = trans * b;
+ end
+ if scaled
+ %beta(:,t) = normaliseC(beta(:,t));
+ beta(:,t) = normalise(beta(:,t));
+ end
+ %gamma(:,t) = normaliseC(alpha(:,t) .* beta(:,t));
+ gamma(:,t) = normalise(alpha(:,t) .* beta(:,t));
+ if compute_xi
+ %xi(:,:,t) = normaliseC((trans .* (alpha(:,t) * b')));
+ xi_summed = xi_summed + normalise((trans .* (alpha(:,t) * b')));
+ end
+ if compute_gamma2
+ denom = obslik(:,t) + (obslik(:,t)==0); % replace 0s with 1s before dividing
+ gamma2(:,:,t) = obslik2(:,:,t) .* mixmat .* repmat(gamma(:,t), [1 M]) ./ repmat(denom, [1 M]);
+ %gamma2(:,:,t) = normaliseC(obslik2(:,:,t) .* mixmat .* repmat(gamma(:,t), [1 M]));
+ end
+end
+
+% We now explain the equation for gamma2
+% Let zt=y(1:t-1,t+1:T) be all observations except y(t)
+% gamma2(Q,M,t) = P(Qt,Mt|yt,zt) = P(yt|Qt,Mt,zt) P(Qt,Mt|zt) / P(yt|zt)
+% = P(yt|Qt,Mt) P(Mt|Qt) P(Qt|zt) / P(yt|zt)
+% Now gamma(Q,t) = P(Qt|yt,zt) = P(yt|Qt) P(Qt|zt) / P(yt|zt)
+% hence
+% P(Qt,Mt|yt,zt) = P(yt|Qt,Mt) P(Mt|Qt) [P(Qt|yt,zt) P(yt|zt) / P(yt|Qt)] / P(yt|zt)
+% = P(yt|Qt,Mt) P(Mt|Qt) P(Qt|yt,zt) / P(yt|Qt)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/gausshmm_train_observed.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/gausshmm_train_observed.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function [initState, transmat, mu, Sigma] = gausshmm_train_observed(obsData, hiddenData, ...
+ nstates, varargin)
+% GAUSSHMM_TRAIN_OBSERVED Estimate params of HMM with Gaussian output from fully observed sequences
+% [initState, transmat, mu, Sigma] = gausshmm_train_observed(obsData, hiddenData, nstates,...)
+%
+% INPUT
+% If all sequences have the same length
+% obsData(:,t,ex)
+% hiddenData(ex,t) - must be ROW vector if only one sequence
+% If sequences have different lengths, we use cell arrays
+% obsData{ex}(:,t)
+% hiddenData{ex}(t)
+%
+% Optional argumnets
+% dirichletPriorWeight - for smoothing transition matrix counts
+%
+% Optional parameters from mixgauss_Mstep:
+% 'cov_type' - 'full', 'diag' or 'spherical' ['full']
+% 'tied_cov' - 1 (Sigma) or 0 (Sigma_i) [0]
+% 'clamped_cov' - pass in clamped value, or [] if unclamped [ [] ]
+% 'clamped_mean' - pass in clamped value, or [] if unclamped [ [] ]
+% 'cov_prior' - Lambda_i, added to YY(:,:,i) [0.01*eye(d,d,Q)]
+%
+% Output
+% mu(:,q)
+% Sigma(:,:,q)
+
+[dirichletPriorWeight, other] = process_options(...
+ varargin, 'dirichletPriorWeight', 0);
+
+[transmat, initState] = transmat_train_observed(hiddenData, nstates, ...
+ 'dirichletPriorWeight', dirichletPriorWeight);
+
+% convert to obsData(:,t*nex)
+if ~iscell(obsData)
+ [D T Nex] = size(obsData);
+ obsData = reshape(obsData, D, T*Nex);
+else
+ obsData = cat(2, obsData{:});
+ hiddenData = cat(2,hiddenData{:});
+end
+[mu, Sigma] = condgaussTrainObserved(obsData, hiddenData(:), nstates, varargin{:});
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mc_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mc_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function S = mc_sample(prior, trans, len, numex)
+% SAMPLE_MC Generate random sequences from a Markov chain.
+% STATE = SAMPLE_MC(PRIOR, TRANS, LEN) generates a sequence of length LEN.
+%
+% STATE = SAMPLE_MC(PRIOR, TRANS, LEN, N) generates N rows each of length LEN.
+
+if nargin==3
+ numex = 1;
+end
+
+S = zeros(numex,len);
+for i=1:numex
+ S(i, 1) = sample_discrete(prior);
+ for t=2:len
+ S(i, t) = sample_discrete(trans(S(i,t-1),:));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mc_sample_endstate.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mc_sample_endstate.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function S = sample_mc_endstate(startprob, trans, endprob)
+% SAMPLE_MC_ENDSTATE Generate a random sequence from a Markov chain until enter the endstate.
+% seq = sample_mc(startprob, trans, endprob)
+
+% add an end state
+Q = size(trans,1);
+transprob = zeros(Q,Q+1);
+end_state = Q+1;
+for i=1:Q
+ for j=1:Q
+ transprob(i,j) = (1-endprob(i)) * trans(i,j);
+ end
+ transprob(i,end_state) = endprob(i);
+ %assert(approxeq(sum(transprob(i,:)), 1))
+end
+
+S = [];
+S(1) = sample_discrete(startprob);
+t = 1;
+p = endprob(S(t));
+stop = (S(1) == end_state);
+while ~stop
+ S(t+1) = sample_discrete(transprob(S(t),:));
+ stop = (S(t+1) == end_state);
+ t = t + 1;
+end
+S = S(1:end-1); % don't include end state
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mdp_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mdp_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function state = sample_mdp(prior, trans, act)
+% SAMPLE_MDP Sample a sequence of states from a Markov Decision Process.
+% state = sample_mdp(prior, trans, act)
+%
+% Inputs:
+% prior(i) = Pr(Q(1)=i)
+% trans{a}(i,j) = Pr(Q(t)=j | Q(t-1)=i, A(t)=a)
+% act(a) = A(t), so act(1) is ignored
+%
+% Output:
+% state is a vector of length T=length(act)
+
+len = length(act);
+state = zeros(1,len);
+state(1) = sample_discrete(prior);
+for t=2:len
+ state(t) = sample_discrete(trans{act(t)}(state(t-1),:));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mhmmParzen_train_observed.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mhmmParzen_train_observed.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+function [initState, transmat, mu, Nproto, pick] = mhmmParzen_train_observed(obsData, hiddenData, ...
+ nstates, maxNproto, varargin)
+% mhmmParzentrain_observed with mixture of Gaussian outputs from fully observed sequences
+% function [initState, transmat, mu, Nproto] = mhmm_train_observed_parzen(obsData, hiddenData, ...
+% nstates, maxNproto)
+%
+%
+% INPUT
+% If all sequences have the same length
+% obsData(:,t,ex)
+% hiddenData(ex,t) - must be ROW vector if only one sequence
+% If sequences have different lengths, we use cell arrays
+% obsData{ex}(:,t)
+% hiddenData{ex}(t)
+%
+% Optional argumnets
+% dirichletPriorWeight - for smoothing transition matrix counts
+% mkSymmetric
+%
+% Output
+% mu(:,q)
+% Nproto(q) is the number of prototypes (mixture components) chosen for state q
+
+[transmat, initState] = transmat_train_observed(...
+ hiddenData, nstates, varargin{:});
+
+% convert to obsData(:,t*nex)
+if ~iscell(obsData)
+ [D T Nex] = size(obsData);
+ obsData = reshape(obsData, D, T*Nex);
+else
+ obsData = cat(2, obsData{:});
+ hiddenData = cat(2, hiddenData{:});
+end
+[mu, Nproto, pick] = parzen_fit_select_unif(obsData, hiddenData(:), maxNproto);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mhmm_em.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mhmm_em.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,173 @@
+function [LL, prior, transmat, mu, Sigma, mixmat] = ...
+ mhmm_em(data, prior, transmat, mu, Sigma, mixmat, varargin);
+% LEARN_MHMM Compute the ML parameters of an HMM with (mixtures of) Gaussians output using EM.
+% [ll_trace, prior, transmat, mu, sigma, mixmat] = learn_mhmm(data, ...
+% prior0, transmat0, mu0, sigma0, mixmat0, ...)
+%
+% Notation: Q(t) = hidden state, Y(t) = observation, M(t) = mixture variable
+%
+% INPUTS:
+% data{ex}(:,t) or data(:,t,ex) if all sequences have the same length
+% prior(i) = Pr(Q(1) = i),
+% transmat(i,j) = Pr(Q(t+1)=j | Q(t)=i)
+% mu(:,j,k) = E[Y(t) | Q(t)=j, M(t)=k ]
+% Sigma(:,:,j,k) = Cov[Y(t) | Q(t)=j, M(t)=k]
+% mixmat(j,k) = Pr(M(t)=k | Q(t)=j) : set to [] or ones(Q,1) if only one mixture component
+%
+% Optional parameters may be passed as 'param_name', param_value pairs.
+% Parameter names are shown below; default values in [] - if none, argument is mandatory.
+%
+% 'max_iter' - max number of EM iterations [10]
+% 'thresh' - convergence threshold [1e-4]
+% 'verbose' - if 1, print out loglik at every iteration [1]
+% 'cov_type' - 'full', 'diag' or 'spherical' ['full']
+%
+% To clamp some of the parameters, so learning does not change them:
+% 'adj_prior' - if 0, do not change prior [1]
+% 'adj_trans' - if 0, do not change transmat [1]
+% 'adj_mix' - if 0, do not change mixmat [1]
+% 'adj_mu' - if 0, do not change mu [1]
+% 'adj_Sigma' - if 0, do not change Sigma [1]
+%
+% If the number of mixture components differs depending on Q, just set the trailing
+% entries of mixmat to 0, e.g., 2 components if Q=1, 3 components if Q=2,
+% then set mixmat(1,3)=0. In this case, B2(1,3,:)=1.0.
+
+if ~isstr(varargin{1}) % catch old syntax
+ error('optional arguments should be passed as string/value pairs')
+end
+
+[max_iter, thresh, verbose, cov_type, adj_prior, adj_trans, adj_mix, adj_mu, adj_Sigma] = ...
+ process_options(varargin, 'max_iter', 10, 'thresh', 1e-4, 'verbose', 1, ...
+ 'cov_type', 'full', 'adj_prior', 1, 'adj_trans', 1, 'adj_mix', 1, ...
+ 'adj_mu', 1, 'adj_Sigma', 1);
+
+previous_loglik = -inf;
+loglik = 0;
+converged = 0;
+num_iter = 1;
+LL = [];
+
+if ~iscell(data)
+ data = num2cell(data, [1 2]); % each elt of the 3rd dim gets its own cell
+end
+numex = length(data);
+
+
+O = size(data{1},1);
+Q = length(prior);
+if isempty(mixmat)
+ mixmat = ones(Q,1);
+end
+M = size(mixmat,2);
+if M == 1
+ adj_mix = 0;
+end
+
+while (num_iter <= max_iter) & ~converged
+ % E step
+ [loglik, exp_num_trans, exp_num_visits1, postmix, m, ip, op] = ...
+ ess_mhmm(prior, transmat, mixmat, mu, Sigma, data);
+
+
+ % M step
+ if adj_prior
+ prior = normalise(exp_num_visits1);
+ end
+ if adj_trans
+ transmat = mk_stochastic(exp_num_trans);
+ end
+ if adj_mix
+ mixmat = mk_stochastic(postmix);
+ end
+ if adj_mu | adj_Sigma
+ [mu2, Sigma2] = mixgauss_Mstep(postmix, m, op, ip, 'cov_type', cov_type);
+ if adj_mu
+ mu = reshape(mu2, [O Q M]);
+ end
+ if adj_Sigma
+ Sigma = reshape(Sigma2, [O O Q M]);
+ end
+ end
+
+ if verbose, fprintf(1, 'iteration %d, loglik = %f\n', num_iter, loglik); end
+ num_iter = num_iter + 1;
+ converged = em_converged(loglik, previous_loglik, thresh);
+ previous_loglik = loglik;
+ LL = [LL loglik];
+end
+
+
+%%%%%%%%%
+
+function [loglik, exp_num_trans, exp_num_visits1, postmix, m, ip, op] = ...
+ ess_mhmm(prior, transmat, mixmat, mu, Sigma, data)
+% ESS_MHMM Compute the Expected Sufficient Statistics for a MOG Hidden Markov Model.
+%
+% Outputs:
+% exp_num_trans(i,j) = sum_l sum_{t=2}^T Pr(Q(t-1) = i, Q(t) = j| Obs(l))
+% exp_num_visits1(i) = sum_l Pr(Q(1)=i | Obs(l))
+%
+% Let w(i,k,t,l) = P(Q(t)=i, M(t)=k | Obs(l))
+% where Obs(l) = Obs(:,:,l) = O_1 .. O_T for sequence l
+% Then
+% postmix(i,k) = sum_l sum_t w(i,k,t,l) (posterior mixing weights/ responsibilities)
+% m(:,i,k) = sum_l sum_t w(i,k,t,l) * Obs(:,t,l)
+% ip(i,k) = sum_l sum_t w(i,k,t,l) * Obs(:,t,l)' * Obs(:,t,l)
+% op(:,:,i,k) = sum_l sum_t w(i,k,t,l) * Obs(:,t,l) * Obs(:,t,l)'
+
+
+verbose = 0;
+
+%[O T numex] = size(data);
+numex = length(data);
+O = size(data{1},1);
+Q = length(prior);
+M = size(mixmat,2);
+exp_num_trans = zeros(Q,Q);
+exp_num_visits1 = zeros(Q,1);
+postmix = zeros(Q,M);
+m = zeros(O,Q,M);
+op = zeros(O,O,Q,M);
+ip = zeros(Q,M);
+
+mix = (M>1);
+
+loglik = 0;
+if verbose, fprintf(1, 'forwards-backwards example # '); end
+for ex=1:numex
+ if verbose, fprintf(1, '%d ', ex); end
+ %obs = data(:,:,ex);
+ obs = data{ex};
+ T = size(obs,2);
+ if mix
+ [B, B2] = mixgauss_prob(obs, mu, Sigma, mixmat);
+ [alpha, beta, gamma, current_loglik, xi, gamma2] = ...
+ fwdback(prior, transmat, B, 'obslik2', B2, 'mixmat', mixmat);
+ else
+ B = mixgauss_prob(obs, mu, Sigma);
+ [alpha, beta, gamma, current_loglik, xi] = fwdback(prior, transmat, B);
+ end
+ loglik = loglik + current_loglik;
+ if verbose, fprintf(1, 'll at ex %d = %f\n', ex, loglik); end
+
+ exp_num_trans = exp_num_trans + sum(xi,3);
+ exp_num_visits1 = exp_num_visits1 + gamma(:,1);
+
+ if mix
+ postmix = postmix + sum(gamma2,3);
+ else
+ postmix = postmix + sum(gamma,2);
+ gamma2 = reshape(gamma, [Q 1 T]); % gamma2(i,m,t) = gamma(i,t)
+ end
+ for i=1:Q
+ for k=1:M
+ w = reshape(gamma2(i,k,:), [1 T]); % w(t) = w(i,k,t,l)
+ wobs = obs .* repmat(w, [O 1]); % wobs(:,t) = w(t) * obs(:,t)
+ m(:,i,k) = m(:,i,k) + sum(wobs, 2); % m(:) = sum_t w(t) obs(:,t)
+ op(:,:,i,k) = op(:,:,i,k) + wobs * obs'; % op(:,:) = sum_t w(t) * obs(:,t) * obs(:,t)'
+ ip(i,k) = ip(i,k) + sum(sum(wobs .* obs, 2)); % ip = sum_t w(t) * obs(:,t)' * obs(:,t)
+ end
+ end
+end
+if verbose, fprintf(1, '\n'); end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mhmm_em_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mhmm_em_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+if 1
+ O = 4;
+ T = 10;
+ nex = 50;
+ M = 2;
+ Q = 3;
+else
+ O = 8; %Number of coefficients in a vector
+ T = 420; %Number of vectors in a sequence
+ nex = 1; %Number of sequences
+ M = 1; %Number of mixtures
+ Q = 6; %Number of states
+end
+cov_type = 'full';
+
+data = randn(O,T,nex);
+
+% initial guess of parameters
+prior0 = normalise(rand(Q,1));
+transmat0 = mk_stochastic(rand(Q,Q));
+
+if 0
+ Sigma0 = repmat(eye(O), [1 1 Q M]);
+ % Initialize each mean to a random data point
+ indices = randperm(T*nex);
+ mu0 = reshape(data(:,indices(1:(Q*M))), [O Q M]);
+ mixmat0 = mk_stochastic(rand(Q,M));
+else
+ [mu0, Sigma0] = mixgauss_init(Q*M, data, cov_type);
+ mu0 = reshape(mu0, [O Q M]);
+ Sigma0 = reshape(Sigma0, [O O Q M]);
+ mixmat0 = mk_stochastic(rand(Q,M));
+end
+
+[LL, prior1, transmat1, mu1, Sigma1, mixmat1] = ...
+ mhmm_em(data, prior0, transmat0, mu0, Sigma0, mixmat0, 'max_iter', 5);
+
+
+loglik = mhmm_logprob(data, prior1, transmat1, mu1, Sigma1, mixmat1);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mhmm_logprob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mhmm_logprob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function [loglik, errors] = mhmm_logprob(data, prior, transmat, mu, Sigma, mixmat)
+% LOG_LIK_MHMM Compute the log-likelihood of a dataset using a (mixture of) Gaussians HMM
+% [loglik, errors] = log_lik_mhmm(data, prior, transmat, mu, sigma, mixmat)
+%
+% data{m}(:,t) or data(:,t,m) if all cases have same length
+% errors is a list of the cases which received a loglik of -infinity
+%
+% Set mixmat to ones(Q,1) or omit it if there is only 1 mixture component
+
+Q = length(prior);
+if size(mixmat,1) ~= Q % trap old syntax
+ error('mixmat should be QxM')
+end
+if nargin < 6, mixmat = ones(Q,1); end
+
+if ~iscell(data)
+ data = num2cell(data, [1 2]); % each elt of the 3rd dim gets its own cell
+end
+ncases = length(data);
+
+loglik = 0;
+errors = [];
+for m=1:ncases
+ obslik = mixgauss_prob(data{m}, mu, Sigma, mixmat);
+ [alpha, beta, gamma, ll] = fwdback(prior, transmat, obslik, 'fwd_only', 1);
+ if ll==-inf
+ errors = [errors m];
+ end
+ loglik = loglik + ll;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mhmm_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mhmm_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+function [obs, hidden] = mhmm_sample(T, numex, initial_prob, transmat, mu, Sigma, mixmat)
+% SAMPLE_MHMM Generate random sequences from an HMM with (mixtures of) Gaussian output.
+% [obs, hidden] = sample_mhmm(T, numex, initial_prob, transmat, mu, Sigma, mixmat)
+%
+% INPUTS:
+% T - length of each sequence
+% numex - num. sequences
+% init_state_prob(i) = Pr(Q(1) = i)
+% transmat(i,j) = Pr(Q(t+1)=j | Q(t)=i)
+% mu(:,j,k) = mean of Y(t) given Q(t)=j, M(t)=k
+% Sigma(:,:,j,k) = cov. of Y(t) given Q(t)=j, M(t)=k
+% mixmat(j,k) = Pr(M(t)=k | Q(t)=j) : set to ones(Q,1) or omit if single mixture
+%
+% OUTPUT:
+% obs(:,t,l) = observation vector at time t for sequence l
+% hidden(t,l) = the hidden state at time t for sequence l
+
+Q = length(initial_prob);
+if nargin < 7, mixmat = ones(Q,1); end
+O = size(mu,1);
+hidden = zeros(T, numex);
+obs = zeros(O, T, numex);
+
+hidden = mc_sample(initial_prob, transmat, T, numex)';
+for i=1:numex
+ for t=1:T
+ q = hidden(t,i);
+ m = sample_discrete(mixmat(q,:), 1, 1);
+ obs(:,t,i) = gaussian_sample(mu(:,q,m), Sigma(:,:,q,m), 1);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mk_leftright_transmat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mk_leftright_transmat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function transmat = mk_leftright_transmat(Q, p)
+% MK_LEFTRIGHT_TRANSMAT Q = num states, p = prob on (i,i), 1-p on (i,i+1)
+% function transmat = mk_leftright_transmat(Q, p)
+
+transmat = p*diag(ones(Q,1)) + (1-p)*diag(ones(Q-1,1),1);
+transmat(Q,Q)=1;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/mk_rightleft_transmat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/mk_rightleft_transmat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function transmat = mk_rightleft_transmat(Q, p)
+% MK_RIGHTLEFT_TRANSMAT Q = num states, p = prob on (i,i), 1-p on (i,i+1)
+% function transmat = mk_rightleft_transmat(Q, p)
+
+transmat = p*diag(ones(Q,1)) + (1-p)*diag(ones(Q-1,1),-1);
+transmat(1,1)=1;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/pomdp_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/pomdp_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+function [obs, hidden] = pomdp_sample(initial_prob, transmat, obsmat, act)
+% SAMPLE_POMDP Generate a random sequence from a Partially Observed Markov Decision Process.
+% [obs, hidden] = sample_pomdp(prior, transmat, obsmat, act)
+%
+% Inputs:
+% prior(i) = Pr(Q(1)=i)
+% transmat{a}(i,j) = Pr(Q(t)=j | Q(t-1)=i, A(t)=a)
+% obsmat(i,k) = Pr(Y(t)=k | Q(t)=i)
+% act(a) = A(t), so act(1) is ignored
+%
+% Output:
+% obs and hidden are vectors of length T=length(act)
+
+
+len = length(act);
+hidden = mdp_sample(initial_prob, transmat, act);
+obs = zeros(1, len);
+for t=1:len
+ obs(t) = sample_discrete(obsmat(hidden(t),:));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/testHMM.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/testHMM.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+% Run all the demos, to check everything is "syntactically correct"
+mhmm_em_demo
+dhmm_em_demo
+dhmm_em_online_demo
+fixed_lag_smoother_demo
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/transmat_train_observed.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/transmat_train_observed.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function [transmat, initState] = transmat_train_observed(labels, nstates, varargin)
+% transmat_train_observed ML estimation from fully observed data
+% function [transmat, initState] = transmat_train_observed(labels, nstates, varargin)
+%
+% If all sequences have the same length
+% labels(ex,t)
+% If sequences have different lengths, we use cell arrays
+% labels{ex}(t)
+
+[dirichletPriorWeight, mkSymmetric, other] = process_options(...
+ varargin, 'dirichletPriorWeight', 0, 'mkSymmetric', 0);
+
+if ~iscell(labels)
+ [numex T] = size(labels);
+ if T==1
+ labels = labels';
+ end
+ %fprintf('T=%d, numex=%d\n', T, numex);
+ labels = num2cell(labels,2); % each row gets its own cell
+end
+numex = length(labels);
+
+counts = zeros(nstates, nstates);
+counts1 = zeros(nstates,1);
+for s=1:numex
+ labs = labels{s}; labs = labs(:)';
+ dat = [labs(1:end-1); labs(2:end)];
+ counts = counts + compute_counts(dat, [nstates nstates]);
+ q = labs(1);
+ counts1(q) = counts1(q) + 1;
+end
+pseudo_counts = dirichletPriorWeight*ones(nstates, nstates);
+if mkSymmetric
+ counts = counts + counts';
+end
+transmat = mk_stochastic(counts + pseudo_counts);
+initState = normalize(counts1 + dirichletPriorWeight*ones(nstates,1));
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/HMM/viterbi_path.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/HMM/viterbi_path.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+function path = viterbi_path(prior, transmat, obslik)
+% VITERBI Find the most-probable (Viterbi) path through the HMM state trellis.
+% path = viterbi(prior, transmat, obslik)
+%
+% Inputs:
+% prior(i) = Pr(Q(1) = i)
+% transmat(i,j) = Pr(Q(t+1)=j | Q(t)=i)
+% obslik(i,t) = Pr(y(t) | Q(t)=i)
+%
+% Outputs:
+% path(t) = q(t), where q1 ... qT is the argmax of the above expression.
+
+
+% delta(j,t) = prob. of the best sequence of length t-1 and then going to state j, and O(1:t)
+% psi(j,t) = the best predecessor state, given that we ended up in state j at t
+
+scaled = 1;
+
+T = size(obslik, 2);
+prior = prior(:);
+Q = length(prior);
+
+delta = zeros(Q,T);
+psi = zeros(Q,T);
+path = zeros(1,T);
+scale = ones(1,T);
+
+
+t=1;
+delta(:,t) = prior .* obslik(:,t);
+if scaled
+ [delta(:,t), n] = normalise(delta(:,t));
+ scale(t) = 1/n;
+end
+psi(:,t) = 0; % arbitrary value, since there is no predecessor to t=1
+for t=2:T
+ for j=1:Q
+ [delta(j,t), psi(j,t)] = max(delta(:,t-1) .* transmat(:,j));
+ delta(j,t) = delta(j,t) * obslik(j,t);
+ end
+ if scaled
+ [delta(:,t), n] = normalise(delta(:,t));
+ scale(t) = 1/n;
+ end
+end
+[p, path(T)] = max(delta(:,T));
+for t=T-1:-1:1
+ path(t) = psi(path(t+1),t+1);
+end
+
+% If scaled==0, p = prob_path(best_path)
+% If scaled==1, p = Pr(replace sum with max and proceed as in the scaled forwards algo)
+% Both are different from p(data) as computed using the sum-product (forwards) algorithm
+
+if 0
+if scaled
+ loglik = -sum(log(scale));
+ %loglik = prob_path(prior, transmat, obslik, path);
+else
+ loglik = log(p);
+end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,80 @@
+/KLgauss.m/1.1.1.1/Tue Apr 26 02:29:16 2005//
+/README.txt/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/beta_sample.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/chisquared_histo.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/chisquared_prob.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/chisquared_readme.txt/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/chisquared_table.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/clg_Mstep.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/clg_Mstep_simple.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/clg_prob.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/condGaussToJoint.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/cond_indep_fisher_z.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/condgaussTrainObserved.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/condgauss_sample.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/convertBinaryLabels.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/cwr_demo.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/cwr_em.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/cwr_predict.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/cwr_prob.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/cwr_readme.txt/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/cwr_test.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/dirichlet_sample.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/dirichletpdf.m/1.1.1.1/Sun May 22 23:32:18 2005//
+/dirichletrnd.m/1.1.1.1/Sun May 22 23:32:12 2005//
+/distchck.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/eigdec.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/est_transmat.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/fit_paritioned_model_testfn.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/fit_partitioned_model.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/gamma_sample.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/gaussian_prob.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/gaussian_sample.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/histCmpChi2.m/1.1.1.1/Tue May 3 20:18:16 2005//
+/linear_regression.m/1.1.1.1/Tue Apr 26 02:29:18 2005//
+/logist2.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/logist2Apply.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/logist2ApplyRegularized.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/logist2Fit.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/logist2FitRegularized.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/logistK.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/logistK_eval.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/marginalize_gaussian.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/matrix_T_pdf.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/matrix_normal_pdf.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mc_stat_distrib.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mixgauss_Mstep.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mixgauss_classifier_apply.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mixgauss_classifier_train.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mixgauss_em.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mixgauss_init.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mixgauss_prob.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mixgauss_prob_test.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mixgauss_sample.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mkPolyFvec.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/mk_unit_norm.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/multinomial_prob.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/multinomial_sample.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/multipdf.m/1.1.1.1/Sun May 22 23:32:42 2005//
+/multirnd.m/1.1.1.1/Sun May 22 23:32:38 2005//
+/normal_coef.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/partial_corr_coef.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/parzen.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/parzenC.c/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/parzenC.dll/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/parzenC.mexglx/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/parzenC_test.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/parzen_fit_select_unif.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/pca.m/1.1.1.1/Tue Apr 26 02:29:20 2005//
+/rndcheck.m/1.1.1.1/Tue Apr 26 02:29:22 2005//
+/sample.m/1.1.1.1/Tue Apr 26 02:29:22 2005//
+/sample_discrete.m/1.1.1.1/Tue Apr 26 02:29:22 2005//
+/sample_gaussian.m/1.1.1.1/Tue Apr 26 02:29:22 2005//
+/standardize.m/1.1.1.1/Wed May 4 04:35:36 2005//
+/student_t_logprob.m/1.1.1.1/Tue Apr 26 02:29:22 2005//
+/student_t_prob.m/1.1.1.1/Tue Apr 26 02:29:22 2005//
+/test_dir.m/1.1.1.1/Sun May 22 23:32:20 2005//
+/unidrndKPM.m/1.1.1.1/Tue May 31 18:19:24 2005//
+/unif_discrete_sample.m/1.1.1.1/Tue Apr 26 02:29:22 2005//
+/weightedRegression.m/1.1.1.1/Tue Apr 26 02:29:22 2005//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/KPMstats
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/KLgauss.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/KLgauss.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function kl = KLgauss(P, Q)
+%The following computes D(P||Q), the KL divergence between two zero-mean
+%Gaussians with covariance P and Q:
+% klDiv = -0.5*(log(det(P*inv(Q))) + trace(eye(N)-P*inv(Q)));
+
+R = P*inv(Q);
+kl = -0.5*(log(det(R))) + trace(eye(length(P))-R);
+
+%To get MI, just set P=cov(X,Y) and Q=blockdiag(cov(X),cov(Y)).
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/README.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/README.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+KPMstats is a directory of miscellaneous statistics functions written by
+Kevin Patrick Murphy and various other people (see individual file headers).
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/beta_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/beta_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,76 @@
+function r = betarnd(a,b,m,n);
+%BETARND Random matrices from beta distribution.
+% R = BETARND(A,B) returns a matrix of random numbers chosen
+% from the beta distribution with parameters A and B.
+% The size of R is the common size of A and B if both are matrices.
+% If either parameter is a scalar, the size of R is the size of the other
+% parameter. Alternatively, R = BETARND(A,B,M,N) returns an M by N matrix.
+
+% Reference:
+% [1] L. Devroye, "Non-Uniform Random Variate Generation",
+% Springer-Verlag, 1986
+
+% Copyright (c) 1993-98 by The MathWorks, Inc.
+% $Revision: 1.1.1.1 $ $Date: 2005/04/26 02:29:18 $
+
+if nargin < 2,
+ error('Requires at least two input arguments');
+end
+
+if nargin == 2
+ [errorcode rows columns] = rndcheck(2,2,a,b);
+end
+
+if nargin == 3
+ [errorcode rows columns] = rndcheck(3,2,a,b,m);
+end
+
+if nargin == 4
+ [errorcode rows columns] = rndcheck(4,2,a,b,m,n);
+end
+
+if errorcode > 0
+ error('Size information is inconsistent.');
+end
+
+r = zeros(rows,columns);
+
+% Use Theorem 4.1, case A (Devroye, page 430) to derive beta
+% random numbers as a ratio of gamma random numbers.
+if prod(size(a)) == 1
+ a1 = a(ones(rows,1),ones(columns,1));
+ g1 = gamrnd(a1,1);
+else
+ g1 = gamrnd(a,1);
+end
+if prod(size(b)) == 1
+ b1 = b(ones(rows,1),ones(columns,1));
+ g2 = gamrnd(b1,1);
+else
+ g2 = gamrnd(b,1);
+end
+r = g1 ./ (g1 + g2);
+
+% Return NaN if b is not positive.
+if any(any(b <= 0));
+ if prod(size(b) == 1)
+ tmp = NaN;
+ r = tmp(ones(rows,columns));
+ else
+ k = find(b <= 0);
+ tmp = NaN;
+ r(k) = tmp(ones(size(k)));
+ end
+end
+
+% Return NaN if a is not positive.
+if any(any(a <= 0));
+ if prod(size(a) == 1)
+ tmp = NaN;
+ r = tmp(ones(rows,columns));
+ else
+ k = find(a <= 0);
+ tmp = NaN;
+ r(k) = tmp(ones(size(k)));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/chisquared_histo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/chisquared_histo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function s = chisquared_histo(h1, h2)
+% Measure distance between 2 histograms (small numbers means more similar)
+denom = h1 + h2;
+denom = denom + (denom==0);
+s = sum(((h1 - h2) .^ 2) ./ denom);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/chisquared_prob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/chisquared_prob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+function P = chisquared_prob(X2,v)
+%CHISQUARED_PROB computes the chi-squared probability function.
+% P = CHISQUARED_PROB( X2, v ) returns P(X2|v), the probability
+% of observing a chi-squared value <= X2 with v degrees of freedom.
+% This is the probability that the sum of squares of v unit-variance
+% normally-distributed random variables is <= X2.
+% X2 and v may be matrices of the same size size, or either
+% may be a scalar.
+%
+% e.g., CHISQUARED_PROB(5.99,2) returns 0.9500, verifying the
+% 95% confidence bound for 2 degrees of freedom. This is also
+% cross-checked in, e.g., Abramowitz & Stegun Table 26.8
+%
+% See also CHISQUARED_TABLE
+%
+%Peter R. Shaw, WHOI
+
+% References: Press et al., Numerical Recipes, Cambridge, 1986;
+% Abramowitz & Stegun, Handbook of Mathematical Functions, Dover, 1972.
+
+% Peter R. Shaw, Woods Hole Oceanographic Institution
+% Woods Hole, MA 02543
+% (508) 457-2000 ext. 2473 pshaw@whoi.edu
+% March, 1990; fixed Oct 1992 for version 4
+
+% Computed using the Incomplete Gamma function,
+% as given by Press et al. (Recipes) eq. (6.2.17)
+
+% Following nonsense is necessary from Matlab version 3 -> version 4
+versn_str=version; eval(['versn=' versn_str(1) ';']);
+if versn<=3, %sigh
+ P = gamma(v/2, X2/2);
+else
+ P = gammainc(X2/2, v/2);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/chisquared_readme.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/chisquared_readme.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+From ftp://ftp.mathworks.com/pub/contrib/v4/stats/chisquared/
+
+Chi-squared probability function.
+
+Abstract
+m-files to compute the Chi-squared probability function, and
+the percentage points of the probability function.
+
+P = CHISQUARED_PROB( X2, v ) returns P(X2|v), the probability
+of observing a chi-squared value <= X2 with v degrees of freedom.
+This is the probability that the sum of squares of v unit-variance
+normally-distributed random variables is <= X2.
+
+Conversely:
+X2 = CHISQUARED_TABLE( P, v ) returns the X2, the value of
+chi-squared corresponding to v degrees of freedom and probability P.
+
+In reference textbooks, what is normally tabulated are the
+percentage points of the chi-squared distribution; thus, one
+would use CHISQUARED_TABLE rather than interpolate such a table.
+
+References: Press et al., Numerical Recipes, Cambridge, 1986;
+Abramowitz & Stegun, Handbook of Mathematical Functions,
+Dover, 1972; Table 26.8
+
+Peter R. Shaw
+Woods Hole Oceanographic Institution, Woods Hole, MA 02543
+(508) 457-2000 ext. 2473
+pshaw@whoi.edu
+
+--FILES GENERATED--
+README_chisq
+chisquared_prob.m (function) -- chi-squared probability function
+chisquared_table.m (function) -- "percentage points" (i.e., inverse)
+ of chi-squared probability function
+chiaux.m (function) -- auxiliary fn. used by chisquared_table
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/chisquared_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/chisquared_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,63 @@
+function X2 = chisquared_table(P,v)
+%CHISQUARED_TABLE computes the "percentage points" of the
+%chi-squared distribution, as in Abramowitz & Stegun Table 26.8
+% X2 = CHISQUARED_TABLE( P, v ) returns the value of chi-squared
+% corresponding to v degrees of freedom and probability P.
+% P is the probability that the sum of squares of v unit-variance
+% normally-distributed random variables is <= X2.
+% P and v may be matrices of the same size size, or either
+% may be a scalar.
+%
+% e.g., to find the 95% confidence interval for 2 degrees
+% of freedom, use CHISQUARED_TABLE( .95, 2 ), yielding 5.99,
+% in agreement with Abramowitz & Stegun's Table 26.8
+%
+% This result can be checked through the function
+% CHISQUARED_PROB( 5.99, 2 ), yielding 0.9500
+%
+% The familiar 1.96-sigma confidence bounds enclosing 95% of
+% a 1-D gaussian is found through
+% sqrt( CHISQUARED_TABLE( .95, 1 )), yielding 1.96
+%
+% See also CHISQUARED_PROB
+%
+%Peter R. Shaw, WHOI
+%Leslie Rosenfeld, MBARI
+
+% References: Press et al., Numerical Recipes, Cambridge, 1986;
+% Abramowitz & Stegun, Handbook of Mathematical Functions, Dover, 1972.
+
+% Peter R. Shaw, Woods Hole Oceanographic Institution
+% Woods Hole, MA 02543 pshaw@whoi.edu
+% Leslie Rosenfeld, MBARI
+% Last revision: Peter Shaw, Oct 1992: fsolve with version 4
+
+% ** Calls function CHIAUX **
+% Computed using the Incomplete Gamma function,
+% as given by Press et al. (Recipes) eq. (6.2.17)
+
+[mP,nP]=size(P);
+[mv,nv]=size(v);
+if mP~=mv | nP~=nv,
+ if mP==1 & nP==1,
+ P=P*ones(mv,nv);
+ elseif mv==1 & nv==1,
+ v=v*ones(mP,nP);
+ else
+ error('P and v must be the same size')
+ end
+end
+[m,n]=size(P); X2 = zeros(m,n);
+for i=1:m,
+ for j=1:n,
+ if v(i,j)<=10,
+ x0=P(i,j)*v(i,j);
+ else
+ x0=v(i,j);
+ end
+% Note: "old" and "new" calls to fsolve may or may not follow
+% Matlab version 3.5 -> version 4 (so I'm keeping the old call around...)
+% X2(i,j) = fsolve('chiaux',x0,zeros(16,1),[v(i,j),P(i,j)]); %(old call)
+ X2(i,j) = fsolve('chiaux',x0,zeros(16,1),[],[v(i,j),P(i,j)]);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/clg_Mstep.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/clg_Mstep.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,203 @@
+function [mu, Sigma, B] = clg_Mstep(w, Y, YY, YTY, X, XX, XY, varargin)
+% MSTEP_CLG Compute ML/MAP estimates for a conditional linear Gaussian
+% [mu, Sigma, B] = Mstep_clg(w, Y, YY, YTY, X, XX, XY, varargin)
+%
+% We fit P(Y|X,Q=i) = N(Y; B_i X + mu_i, Sigma_i)
+% and w(i,t) = p(M(t)=i|y(t)) = posterior responsibility
+% See www.ai.mit.edu/~murphyk/Papers/learncg.pdf.
+%
+% See process_options for how to specify the input arguments.
+%
+% INPUTS:
+% w(i) = sum_t w(i,t) = responsibilities for each mixture component
+% If there is only one mixture component (i.e., Q does not exist),
+% then w(i) = N = nsamples, and
+% all references to i can be replaced by 1.
+% Y(:,i) = sum_t w(i,t) y(:,t) = weighted observations
+% YY(:,:,i) = sum_t w(i,t) y(:,t) y(:,t)' = weighted outer product
+% YTY(i) = sum_t w(i,t) y(:,t)' y(:,t) = weighted inner product
+% You only need to pass in YTY if Sigma is to be estimated as spherical.
+%
+% In the regression context, we must also pass in the following
+% X(:,i) = sum_t w(i,t) x(:,t) = weighted inputs
+% XX(:,:,i) = sum_t w(i,t) x(:,t) x(:,t)' = weighted outer product
+% XY(i) = sum_t w(i,t) x(:,t) y(:,t)' = weighted outer product
+%
+% Optional inputs (default values in [])
+%
+% 'cov_type' - 'full', 'diag' or 'spherical' ['full']
+% 'tied_cov' - 1 (Sigma) or 0 (Sigma_i) [0]
+% 'clamped_cov' - pass in clamped value, or [] if unclamped [ [] ]
+% 'clamped_mean' - pass in clamped value, or [] if unclamped [ [] ]
+% 'clamped_weights' - pass in clamped value, or [] if unclamped [ [] ]
+% 'cov_prior' - added to Sigma(:,:,i) to ensure psd [0.01*eye(d,d,Q)]
+%
+% If cov is tied, Sigma has size d*d.
+% But diagonal and spherical covariances are represented in full size.
+
+[cov_type, tied_cov, ...
+ clamped_cov, clamped_mean, clamped_weights, cov_prior, ...
+ xs, ys, post] = ...
+ process_options(varargin, ...
+ 'cov_type', 'full', 'tied_cov', 0, 'clamped_cov', [], 'clamped_mean', [], ...
+ 'clamped_weights', [], 'cov_prior', [], ...
+ 'xs', [], 'ys', [], 'post', []);
+
+[Ysz Q] = size(Y);
+
+if isempty(X) % no regression
+ %B = [];
+ B2 = zeros(Ysz, 1, Q);
+ for i=1:Q
+ B(:,:,i) = B2(:,1:0,i); % make an empty array of size Ysz x 0 x Q
+ end
+ [mu, Sigma] = mixgauss_Mstep(w, Y, YY, YTY, varargin{:});
+ return;
+end
+
+
+N = sum(w);
+if isempty(cov_prior)
+ cov_prior = 0.01*repmat(eye(Ysz,Ysz), [1 1 Q]);
+end
+%YY = YY + cov_prior; % regularize the scatter matrix
+
+% Set any zero weights to one before dividing
+% This is valid because w(i)=0 => Y(:,i)=0, etc
+w = w + (w==0);
+
+Xsz = size(X,1);
+% Append 1 to X to get Z
+ZZ = zeros(Xsz+1, Xsz+1, Q);
+ZY = zeros(Xsz+1, Ysz, Q);
+for i=1:Q
+ ZZ(:,:,i) = [XX(:,:,i) X(:,i);
+ X(:,i)' w(i)];
+ ZY(:,:,i) = [XY(:,:,i);
+ Y(:,i)'];
+end
+
+
+%%% Estimate mean and regression
+
+if ~isempty(clamped_weights) & ~isempty(clamped_mean)
+ B = clamped_weights;
+ mu = clamped_mean;
+end
+if ~isempty(clamped_weights) & isempty(clamped_mean)
+ B = clamped_weights;
+ % eqn 5
+ mu = zeros(Ysz, Q);
+ for i=1:Q
+ mu(:,i) = (Y(:,i) - B(:,:,i)*X(:,i)) / w(i);
+ end
+end
+if isempty(clamped_weights) & ~isempty(clamped_mean)
+ mu = clamped_mean;
+ % eqn 3
+ B = zeros(Ysz, Xsz, Q);
+ for i=1:Q
+ tmp = XY(:,:,i)' - mu(:,i)*X(:,i)';
+ %B(:,:,i) = tmp * inv(XX(:,:,i));
+ B(:,:,i) = (XX(:,:,i) \ tmp')';
+ end
+end
+if isempty(clamped_weights) & isempty(clamped_mean)
+ mu = zeros(Ysz, Q);
+ B = zeros(Ysz, Xsz, Q);
+ % Nothing is clamped, so we must estimate B and mu jointly
+ for i=1:Q
+ % eqn 9
+ if rcond(ZZ(:,:,i)) < 1e-10
+ sprintf('clg_Mstep warning: ZZ(:,:,%d) is ill-conditioned', i);
+ % probably because there are too few cases for a high-dimensional input
+ ZZ(:,:,i) = ZZ(:,:,i) + 1e-5*eye(Xsz+1);
+ end
+ %A = ZY(:,:,i)' * inv(ZZ(:,:,i));
+ A = (ZZ(:,:,i) \ ZY(:,:,i))';
+ B(:,:,i) = A(:, 1:Xsz);
+ mu(:,i) = A(:, Xsz+1);
+ end
+end
+
+if ~isempty(clamped_cov)
+ Sigma = clamped_cov;
+ return;
+end
+
+
+%%% Estimate covariance
+
+% Spherical
+if cov_type(1)=='s'
+ if ~tied_cov
+ Sigma = zeros(Ysz, Ysz, Q);
+ for i=1:Q
+ % eqn 16
+ A = [B(:,:,i) mu(:,i)];
+ %s = trace(YTY(i) + A'*A*ZZ(:,:,i) - 2*A*ZY(:,:,i)) / (Ysz*w(i)); % wrong!
+ s = (YTY(i) + trace(A'*A*ZZ(:,:,i)) - trace(2*A*ZY(:,:,i))) / (Ysz*w(i));
+ Sigma(:,:,i) = s*eye(Ysz,Ysz);
+
+ %%%%%%%%%%%%%%%%%%% debug
+ if ~isempty(xs)
+ [nx T] = size(xs);
+ zs = [xs; ones(1,T)];
+ yty = 0;
+ zAAz = 0;
+ yAz = 0;
+ for t=1:T
+ yty = yty + ys(:,t)'*ys(:,t) * post(i,t);
+ zAAz = zAAz + zs(:,t)'*A'*A*zs(:,t)*post(i,t);
+ yAz = yAz + ys(:,t)'*A*zs(:,t)*post(i,t);
+ end
+ assert(approxeq(yty, YTY(i)))
+ assert(approxeq(zAAz, trace(A'*A*ZZ(:,:,i))))
+ assert(approxeq(yAz, trace(A*ZY(:,:,i))))
+ s2 = (yty + zAAz - 2*yAz) / (Ysz*w(i));
+ assert(approxeq(s,s2))
+ end
+ %%%%%%%%%%%%%%% end debug
+
+ end
+ else
+ S = 0;
+ for i=1:Q
+ % eqn 18
+ A = [B(:,:,i) mu(:,i)];
+ S = S + trace(YTY(i) + A'*A*ZZ(:,:,i) - 2*A*ZY(:,:,i));
+ end
+ Sigma = repmat(S / (N*Ysz), [1 1 Q]);
+ end
+else % Full/diagonal
+ if ~tied_cov
+ Sigma = zeros(Ysz, Ysz, Q);
+ for i=1:Q
+ A = [B(:,:,i) mu(:,i)];
+ % eqn 10
+ SS = (YY(:,:,i) - ZY(:,:,i)'*A' - A*ZY(:,:,i) + A*ZZ(:,:,i)*A') / w(i);
+ if cov_type(1)=='d'
+ Sigma(:,:,i) = diag(diag(SS));
+ else
+ Sigma(:,:,i) = SS;
+ end
+ end
+ else % tied
+ SS = zeros(Ysz, Ysz);
+ for i=1:Q
+ A = [B(:,:,i) mu(:,i)];
+ % eqn 13
+ SS = SS + (YY(:,:,i) - ZY(:,:,i)'*A' - A*ZY(:,:,i) + A*ZZ(:,:,i)*A');
+ end
+ SS = SS / N;
+ if cov_type(1)=='d'
+ Sigma = diag(diag(SS));
+ else
+ Sigma = SS;
+ end
+ Sigma = repmat(Sigma, [1 1 Q]);
+ end
+end
+
+Sigma = Sigma + cov_prior;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/clg_Mstep_simple.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/clg_Mstep_simple.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function [mu, B] = clg_Mstep_simple(w, Y, YY, YTY, X, XX, XY)
+% CLG_MSTEP_SIMPLE Same as CLG_MSTEP, but doesn;t estimate Sigma, so is slightly faster
+% function [mu, B] = clg_Mstep_simple(w, Y, YY, YTY, X, XX, XY)
+%
+% See clg_Mstep for details.
+% Unlike clg_Mstep, there are no optional arguments, which are slow to process
+% if this function is inside a tight loop.
+
+[Ysz Q] = size(Y);
+
+if isempty(X) % no regression
+ %B = [];
+ B2 = zeros(Ysz, 1, Q);
+ for i=1:Q
+ B(:,:,i) = B2(:,1:0,i); % make an empty array of size Ysz x 0 x Q
+ end
+ [mu, Sigma] = mixgauss_Mstep(w, Y, YY, YTY);
+ return;
+end
+
+N = sum(w);
+%YY = YY + cov_prior; % regularize the scatter matrix
+
+% Set any zero weights to one before dividing
+% This is valid because w(i)=0 => Y(:,i)=0, etc
+w = w + (w==0);
+
+Xsz = size(X,1);
+% Append 1 to X to get Z
+ZZ = zeros(Xsz+1, Xsz+1, Q);
+ZY = zeros(Xsz+1, Ysz, Q);
+for i=1:Q
+ ZZ(:,:,i) = [XX(:,:,i) X(:,i);
+ X(:,i)' w(i)];
+ ZY(:,:,i) = [XY(:,:,i);
+ Y(:,i)'];
+end
+
+mu = zeros(Ysz, Q);
+B = zeros(Ysz, Xsz, Q);
+for i=1:Q
+ % eqn 9
+ if rcond(ZZ(:,:,i)) < 1e-10
+ sprintf('clg_Mstep warning: ZZ(:,:,%d) is ill-conditioned', i);
+ %probably because there are too few cases for a high-dimensional input
+ ZZ(:,:,i) = ZZ(:,:,i) + 1e-5*eye(Xsz+1);
+ end
+ %A = ZY(:,:,i)' * inv(ZZ(:,:,i));
+ A = (ZZ(:,:,i) \ ZY(:,:,i))';
+ B(:,:,i) = A(:, 1:Xsz);
+ mu(:,i) = A(:, Xsz+1);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/clg_prob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/clg_prob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function p = eval_pdf_clg(X,Y,mu,Sigma,W)
+% function p = eval_pdf_clg(X,Y,mu,Sigma,W)
+%
+% p(c,t) = N(Y(:,t); mu(:,c) + W(:,:,c)*X(:,t), Sigma(:,:,c))
+
+[d T] = size(Y);
+[d nc] = size(mu);
+p = zeros(nc,T);
+for c=1:nc
+ denom = (2*pi)^(d/2)*sqrt(abs(det(Sigma(:,:,c))));
+ M = repmat(mu(:,c), 1, T) + W(:,:,c)*X;
+ mahal = sum(((Y-M)'*inv(Sigma(:,:,c))).*(Y-M)',2);
+ p(c,:) = (exp(-0.5*mahal) / denom)';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/condGaussToJoint.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/condGaussToJoint.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [muXY, SigmaXY] = condGaussToJoint(muX, SigmaX, muY, SigmaY, WYgivenX)
+
+% Compute P(X,Y) from P(X) * P(Y|X) where P(X)=N(X;muX,SigmaX)
+% and P(Y|X) = N(Y; WX + muY, SigmaY)
+
+% For details on how to compute a Gaussian from a Bayes net
+% - "Gaussian Influence Diagrams", R. Shachter and C. R. Kenley, Management Science, 35(5):527--550, 1989.
+
+% size(W) = dy x dx
+dx = length(muX);
+dy = length(muY);
+muXY = [muX(:); WYgivenX*muX(:) + muY];
+
+W = [zeros(dx,dx) WYgivenX';
+ zeros(dy,dx) zeros(dy,dy)];
+D = [SigmaX zeros(dx,dy);
+ zeros(dy,dx) SigmaY];
+
+U = inv(eye(size(W)) - W')';
+SigmaXY = U' * D * U;
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/cond_indep_fisher_z.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/cond_indep_fisher_z.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,142 @@
+function [CI, r, p] = cond_indep_fisher_z(X, Y, S, C, N, alpha)
+% COND_INDEP_FISHER_Z Test if X indep Y given Z using Fisher's Z test
+% CI = cond_indep_fisher_z(X, Y, S, C, N, alpha)
+%
+% C is the covariance (or correlation) matrix
+% N is the sample size
+% alpha is the significance level (default: 0.05)
+%
+% See p133 of T. Anderson, "An Intro. to Multivariate Statistical Analysis", 1984
+
+if nargin < 6, alpha = 0.05; end
+
+r = partial_corr_coef(C, X, Y, S);
+z = 0.5*log( (1+r)/(1-r) );
+z0 = 0;
+W = sqrt(N - length(S) - 3)*(z-z0); % W ~ N(0,1)
+cutoff = norminv(1 - 0.5*alpha); % P(|W| <= cutoff) = 0.95
+%cutoff = mynorminv(1 - 0.5*alpha); % P(|W| <= cutoff) = 0.95
+if abs(W) < cutoff
+ CI = 1;
+else % reject the null hypothesis that rho = 0
+ CI = 0;
+end
+p = normcdf(W);
+%p = mynormcdf(W);
+
+%%%%%%%%%
+
+function p = normcdf(x,mu,sigma)
+%NORMCDF Normal cumulative distribution function (cdf).
+% P = NORMCDF(X,MU,SIGMA) computes the normal cdf with mean MU and
+% standard deviation SIGMA at the values in X.
+%
+% The size of P is the common size of X, MU and SIGMA. A scalar input
+% functions as a constant matrix of the same size as the other inputs.
+%
+% Default values for MU and SIGMA are 0 and 1 respectively.
+
+% References:
+% [1] M. Abramowitz and I. A. Stegun, "Handbook of Mathematical
+% Functions", Government Printing Office, 1964, 26.2.
+
+% Copyright (c) 1993-98 by The MathWorks, Inc.
+% $Revision: 1.1.1.1 $ $Date: 2005/04/26 02:29:18 $
+
+if nargin < 3,
+ sigma = 1;
+end
+
+if nargin < 2;
+ mu = 0;
+end
+
+[errorcode x mu sigma] = distchck(3,x,mu,sigma);
+
+if errorcode > 0
+ error('Requires non-scalar arguments to match in size.');
+end
+
+% Initialize P to zero.
+p = zeros(size(x));
+
+% Return NaN if SIGMA is not positive.
+k1 = find(sigma <= 0);
+if any(k1)
+ tmp = NaN;
+ p(k1) = tmp(ones(size(k1)));
+end
+
+% Express normal CDF in terms of the error function.
+k = find(sigma > 0);
+if any(k)
+ p(k) = 0.5 * erfc( - (x(k) - mu(k)) ./ (sigma(k) * sqrt(2)));
+end
+
+% Make sure that round-off errors never make P greater than 1.
+k2 = find(p > 1);
+if any(k2)
+ p(k2) = ones(size(k2));
+end
+
+%%%%%%%%
+
+function x = norminv(p,mu,sigma);
+%NORMINV Inverse of the normal cumulative distribution function (cdf).
+% X = NORMINV(P,MU,SIGMA) finds the inverse of the normal cdf with
+% mean, MU, and standard deviation, SIGMA.
+%
+% The size of X is the common size of the input arguments. A scalar input
+% functions as a constant matrix of the same size as the other inputs.
+%
+% Default values for MU and SIGMA are 0 and 1 respectively.
+
+% References:
+% [1] M. Abramowitz and I. A. Stegun, "Handbook of Mathematical
+% Functions", Government Printing Office, 1964, 7.1.1 and 26.2.2
+
+% Copyright (c) 1993-98 by The MathWorks, Inc.
+% $Revision: 1.1.1.1 $ $Date: 2005/04/26 02:29:18 $
+
+if nargin < 3,
+ sigma = 1;
+end
+
+if nargin < 2;
+ mu = 0;
+end
+
+[errorcode p mu sigma] = distchck(3,p,mu,sigma);
+
+if errorcode > 0
+ error('Requires non-scalar arguments to match in size.');
+end
+
+% Allocate space for x.
+x = zeros(size(p));
+
+% Return NaN if the arguments are outside their respective limits.
+k = find(sigma <= 0 | p < 0 | p > 1);
+if any(k)
+ tmp = NaN;
+ x(k) = tmp(ones(size(k)));
+end
+
+% Put in the correct values when P is either 0 or 1.
+k = find(p == 0);
+if any(k)
+ tmp = Inf;
+ x(k) = -tmp(ones(size(k)));
+end
+
+k = find(p == 1);
+if any(k)
+ tmp = Inf;
+ x(k) = tmp(ones(size(k)));
+end
+
+% Compute the inverse function for the intermediate values.
+k = find(p > 0 & p < 1 & sigma > 0);
+if any(k),
+ x(k) = sqrt(2) * sigma(k) .* erfinv(2 * p(k) - 1) + mu(k);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/condgaussTrainObserved.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/condgaussTrainObserved.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function [mu, Sigma] = mixgaussTrainObserved(obsData, hiddenData, nstates, varargin);
+% mixgaussTrainObserved Max likelihood estimates of conditional Gaussian from raw data
+% function [mu, Sigma] = mixgaussTrainObserved(obsData, hiddenData, nstates, ...);
+%
+% Input:
+% obsData(:,i)
+% hiddenData(i) - this is the mixture component label for example i
+% Optional arguments - same as mixgauss_Mstep
+%
+% Output:
+% mu(:,q)
+% Sigma(:,:,q) - same as mixgauss_Mstep
+
+[D numex] = size(obsData);
+Y = zeros(D, nstates);
+YY = zeros(D,D,nstates);
+YTY = zeros(nstates,1);
+w = zeros(nstates, 1);
+for q=1:nstates
+ ndx = find(hiddenData==q);
+ w(q) = length(ndx); % each data point has probability 1 of being in this cluster
+ data = obsData(:,ndx);
+ Y(:,q) = sum(data,2);
+ YY(:,:,q) = data*data';
+ YTY(q) = sum(diag(data'*data));
+end
+[mu, Sigma] = mixgauss_Mstep(w, Y, YY, YTY, varargin{:});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/condgauss_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/condgauss_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function x = mixgauss_sample(mu, Sigma, labels)
+% MIXGAUSS_SAMPLE Sample from a mixture of Gaussians given known mixture labels
+% function x = mixgauss_sample(mu, Sigma, labels)
+
+T = length(labels);
+[D Q] = size(mu);
+x = zeros(D,T);
+for q=1:Q
+ ndx = find(labels==q);
+ x(:,ndx) = gaussian_sample(mu(:,q)', Sigma(:,:,q), length(ndx))';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/convertBinaryLabels.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/convertBinaryLabels.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+% labels01 = (labelsPM+1)/2; % maps -1->0, +1->1
+% labelsPM = (2*labels01)-1; % maps 0,1 -> -1,1
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/cwr_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/cwr_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,124 @@
+% Compare my code with
+% http://www.media.mit.edu/physics/publications/books/nmm/files/index.html
+%
+% cwm.m
+% (c) Neil Gershenfeld 9/1/97
+% 1D Cluster-Weighted Modeling example
+%
+clear all
+figure;
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+x = (-10:10)';
+y = (x > 0);
+npts = length(x);
+plot(x,y,'+')
+xlabel('x')
+ylabel('y')
+nclusters = 4;
+nplot = 100;
+xplot = 24*(1:nplot)'/nplot - 12;
+
+mux = 20*rand(1,nclusters) - 10;
+muy = zeros(1,nclusters);
+varx = ones(1,nclusters);
+vary = ones(1,nclusters);
+pc = 1/nclusters * ones(1,nclusters);
+niterations = 5;
+eps = 0.01;
+
+
+I = repmat(eye(1,1), [1 1 nclusters]);
+O = repmat(zeros(1,1), [1 1 nclusters]);
+X = x(:)';
+Y = y(:)';
+
+cwr = cwr_em(X, Y, nclusters, 'muX', mux, 'muY', muy, 'SigmaX', I, ...
+ 'cov_typeX', 'spherical', 'SigmaY', I, 'cov_typeY', 'spherical', ...
+ 'priorC', pc, 'weightsY', O, 'create_init_params', 0, ...
+ 'clamp_weights', 1, 'max_iter', niterations, ...
+ 'cov_priorX', eps*ones(1,1,nclusters), ...
+ 'cov_priorY', eps*ones(1,1,nclusters));
+
+
+% Gershenfeld's EM code
+for step = 1:niterations
+ pplot = exp(-(kron(xplot,ones(1,nclusters)) ...
+ - kron(ones(nplot,1),mux)).^2 ...
+ ./ (2*kron(ones(nplot,1),varx))) ...
+ ./ sqrt(2*pi*kron(ones(nplot,1),varx)) ...
+ .* kron(ones(nplot,1),pc);
+ plot(xplot,pplot,'k');
+ pause(0);
+ px = exp(-(kron(x,ones(1,nclusters)) ...
+ - kron(ones(npts,1),mux)).^2 ...
+ ./ (2*kron(ones(npts,1),varx))) ...
+ ./ sqrt(2*pi*kron(ones(npts,1),varx));
+ py = exp(-(kron(y,ones(1,nclusters)) ...
+ - kron(ones(npts,1),muy)).^2 ...
+ ./ (2*kron(ones(npts,1),vary))) ...
+ ./ sqrt(2*pi*kron(ones(npts,1),vary));
+ p = px .* py .* kron(ones(npts,1),pc);
+ pp = p ./ kron(sum(p,2),ones(1,nclusters));
+ pc = sum(pp)/npts;
+ yfit = sum(kron(ones(npts,1),muy) .* p,2) ...
+ ./ sum(p,2);
+ mux = sum(kron(x,ones(1,nclusters)) .* pp) ...
+ ./ (npts*pc);
+ varx = eps + sum((kron(x,ones(1,nclusters)) ...
+ - kron(ones(npts,1),mux)).^2 .* pp) ...
+ ./ (npts*pc);
+ muy = sum(kron(y,ones(1,nclusters)) .* pp) ...
+ ./ (npts*pc);
+ vary = eps + sum((kron(y,ones(1,nclusters)) ...
+ - kron(ones(npts,1),muy)).^2 .* pp) ...
+ ./ (npts*pc);
+end
+
+
+% Check equal
+cwr_pc = cwr.priorC';
+assert(approxeq(cwr_pc, pc))
+cwr_mux = cwr.muX;
+assert(approxeq(mux, cwr_mux))
+cwr_SigmaX = squeeze(cwr.SigmaX)';
+assert(approxeq(varx, cwr_SigmaX))
+cwr_muy = cwr.muY;
+assert(approxeq(muy, cwr_muy))
+cwr_SigmaY = squeeze(cwr.SigmaY)';
+assert(approxeq(vary, cwr_SigmaY))
+
+
+% Prediction
+
+X = xplot(:)';
+[cwr_mu, Sigma, post] = cwr_predict(cwr, X);
+cwr_ystd = squeeze(Sigma)';
+
+% pplot(t,c)
+pplot = exp(-(kron(xplot,ones(1,nclusters)) ...
+ - kron(ones(nplot,1),mux)).^2 ...
+ ./ (2*kron(ones(nplot,1),varx))) ...
+ ./ sqrt(2*pi*kron(ones(nplot,1),varx)) ...
+ .* kron(ones(nplot,1),pc);
+yplot = sum(kron(ones(nplot,1),muy) .* pplot,2) ...
+ ./ sum(pplot,2);
+ystdplot = sum(kron(ones(nplot,1),(muy.^2+vary)) .* pplot,2) ...
+ ./ sum(pplot,2) - yplot.^2;
+
+
+% Check equal
+assert(approxeq(yplot(:)', cwr_mu(:)'))
+assert(approxeq(ystdplot, cwr_ystd))
+assert(approxeq(pplot ./ repmat(sum(pplot,2), 1, nclusters),post') )
+
+plot(xplot,yplot,'k');
+hold on
+plot(xplot,yplot+ystdplot,'k--');
+plot(xplot,yplot-ystdplot,'k--');
+plot(x,y,'k+');
+axis([-12 12 -1 1.1]);
+plot(xplot,.8*pplot/max(max(pplot))-1,'k')
+hold off
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/cwr_em.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/cwr_em.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,161 @@
+function cwr = cwr_em(X, Y, nc, varargin)
+% CWR_LEARN Fit the parameters of a cluster weighted regression model using EM
+% function cwr = cwr_learn(X, Y, ...)
+%
+% X(:, t) is the t'th input example
+% Y(:, t) is the t'th output example
+% nc is the number of clusters
+%
+% Kevin Murphy, May 2003
+
+[max_iter, thresh, cov_typeX, cov_typeY, clamp_weights, ...
+ muX, muY, SigmaX, SigmaY, weightsY, priorC, create_init_params, ...
+cov_priorX, cov_priorY, verbose, regress, clamp_covX, clamp_covY] = process_options(...
+ varargin, 'max_iter', 10, 'thresh', 1e-2, 'cov_typeX', 'full', ...
+ 'cov_typeY', 'full', 'clamp_weights', 0, ...
+ 'muX', [], 'muY', [], 'SigmaX', [], 'SigmaY', [], 'weightsY', [], 'priorC', [], ...
+ 'create_init_params', 1, 'cov_priorX', [], 'cov_priorY', [], 'verbose', 0, ...
+ 'regress', 1, 'clamp_covX', 0, 'clamp_covY', 0);
+
+[nx N] = size(X);
+[ny N2] = size(Y);
+if N ~= N2
+ error(sprintf('nsamples X (%d) ~= nsamples Y (%d)', N, N2));
+end
+%if N < nx
+% fprintf('cwr_em warning: dim X (%d) > nsamples X (%d)\n', nx, N);
+%end
+if (N < nx) & regress
+ fprintf('cwr_em warning: dim X = %d, nsamples X = %d\n', nx, N);
+end
+if (N < ny)
+ fprintf('cwr_em warning: dim Y = %d, nsamples Y = %d\n', ny, N);
+end
+if (nc > N)
+ error(sprintf('cwr_em: more centers (%d) than data', nc))
+end
+
+if nc==1
+ % No latent variable, so there is a closed-form solution
+ w = 1/N;
+ WYbig = Y*w;
+ WYY = WYbig * Y';
+ WY = sum(WYbig, 2);
+ WYTY = sum(diag(WYbig' * Y));
+ cwr.priorC = 1;
+ cwr.SigmaX = [];
+ if ~regress
+ % This is just fitting an unconditional Gaussian
+ cwr.weightsY = [];
+ [cwr.muY, cwr.SigmaY] = ...
+ mixgauss_Mstep(1, WY, WYY, WYTY, ...
+ 'cov_type', cov_typeY, 'cov_prior', cov_priorY);
+ % There is a much easier way...
+ assert(approxeq(cwr.muY, mean(Y')))
+ assert(approxeq(cwr.SigmaY, cov(Y') + 0.01*eye(ny)))
+ else
+ % This is just linear regression
+ WXbig = X*w;
+ WXX = WXbig * X';
+ WX = sum(WXbig, 2);
+ WXTX = sum(diag(WXbig' * X));
+ WXY = WXbig * Y';
+ [cwr.muY, cwr.SigmaY, cwr.weightsY] = ...
+ clg_Mstep(1, WY, WYY, WYTY, WX, WXX, WXY, ...
+ 'cov_type', cov_typeY, 'cov_prior', cov_priorY);
+ end
+ if clamp_covY, cwr.SigmaY = SigmaY; end
+ if clamp_weights, cwr.weightsY = weightsY; end
+ return;
+end
+
+
+if create_init_params
+ [cwr.muX, cwr.SigmaX] = mixgauss_init(nc, X, cov_typeX);
+ [cwr.muY, cwr.SigmaY] = mixgauss_init(nc, Y, cov_typeY);
+ cwr.weightsY = zeros(ny, nx, nc);
+ cwr.priorC = normalize(ones(nc,1));
+else
+ cwr.muX = muX; cwr.muY = muY; cwr.SigmaX = SigmaX; cwr.SigmaY = SigmaY;
+ cwr.weightsY = weightsY; cwr.priorC = priorC;
+end
+
+
+if clamp_covY, cwr.SigmaY = SigmaY; end
+if clamp_covX, cwr.SigmaX = SigmaX; end
+if clamp_weights, cwr.weightsY = weightsY; end
+
+previous_loglik = -inf;
+num_iter = 1;
+converged = 0;
+
+while (num_iter <= max_iter) & ~converged
+
+ % E step
+
+ [likXandY, likYgivenX, post] = cwr_prob(cwr, X, Y);
+ loglik = sum(log(likXandY));
+ % extract expected sufficient statistics
+ w = sum(post,2); % post(c,t)
+ WYY = zeros(ny, ny, nc);
+ WY = zeros(ny, nc);
+ WYTY = zeros(nc,1);
+
+ WXX = zeros(nx, nx, nc);
+ WX = zeros(nx, nc);
+ WXTX = zeros(nc, 1);
+ WXY = zeros(nx,ny,nc);
+ %WYY = repmat(reshape(w, [1 1 nc]), [ny ny 1]) .* repmat(Y*Y', [1 1 nc]);
+ for c=1:nc
+ weights = repmat(post(c,:), ny, 1);
+ WYbig = Y .* weights;
+ WYY(:,:,c) = WYbig * Y';
+ WY(:,c) = sum(WYbig, 2);
+ WYTY(c) = sum(diag(WYbig' * Y));
+
+ weights = repmat(post(c,:), nx, 1); % weights(nx, nsamples)
+ WXbig = X .* weights;
+ WXX(:,:,c) = WXbig * X';
+ WX(:,c) = sum(WXbig, 2);
+ WXTX(c) = sum(diag(WXbig' * X));
+ WXY(:,:,c) = WXbig * Y';
+ end
+
+ % M step
+ % Q -> X is called Q->Y in Mstep_clg
+ [cwr.muX, cwr.SigmaX] = mixgauss_Mstep(w, WX, WXX, WXTX, ...
+ 'cov_type', cov_typeX, 'cov_prior', cov_priorX);
+ for c=1:nc
+ assert(is_psd(cwr.SigmaX(:,:,c)))
+ end
+
+ if clamp_weights % affects estimate of mu and Sigma
+ W = cwr.weightsY;
+ else
+ W = [];
+ end
+ [cwr.muY, cwr.SigmaY, cwr.weightsY] = ...
+ clg_Mstep(w, WY, WYY, WYTY, WX, WXX, WXY, ...
+ 'cov_type', cov_typeY, 'clamped_weights', W, ...
+ 'cov_prior', cov_priorY);
+ %'xs', X, 'ys', Y, 'post', post); % debug
+ %a = linspace(min(Y(2,:)), max(Y(2,:)), nc+2);
+ %cwr.muY(2,:) = a(2:end-1);
+
+ cwr.priorC = normalize(w);
+
+ for c=1:nc
+ assert(is_psd(cwr.SigmaY(:,:,c)))
+ end
+
+ if clamp_covY, cwr.SigmaY = SigmaY; end
+ if clamp_covX, cwr.SigmaX = SigmaX; end
+ if clamp_weights, cwr.weightsY = weightsY; end
+
+ if verbose, fprintf(1, 'iteration %d, loglik = %f\n', num_iter, loglik); end
+ num_iter = num_iter + 1;
+ converged = em_converged(loglik, previous_loglik, thresh);
+ previous_loglik = loglik;
+
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/cwr_predict.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/cwr_predict.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+function [mu, Sigma, weights, mask] = cwr_predict(cwr, X, mask_data)
+% CWR_PREDICT cluster weighted regression: predict Y given X
+% function [mu, Sigma] = cwr_predict(cwr, X)
+%
+% mu(:,t) = E[Y|X(:,t)] = sum_c P(c | X(:,t)) E[Y|c, X(:,t)]
+% Sigma(:,:,t) = Cov[Y|X(:,t)]
+%
+% [mu, Sigma, weights, mask] = cwr_predict(cwr, X, mask_data)
+% mask(i) = sum_t sum_c p(mask_data(:,i) | X(:,t), c) P(c|X(:,t))
+% This evaluates the predictive density on a set of points
+% (This is only sensible if T=1, ie. X is a single vector)
+
+[nx T] = size(X);
+[ny nx nc] = size(cwr.weightsY);
+mu = zeros(ny, T);
+Sigma = zeros(ny, ny, T);
+
+if nargout == 4
+ comp_mask = 1;
+ N = size(mask_data,2);
+ mask = zeros(N,1);
+else
+ comp_mask = 0;
+end
+
+if nc==1
+ if isempty(cwr.weightsY)
+ mu = repmat(cwr.muY, 1, T);
+ Sigma = repmat(cwr.SigmaY, [1 1 T]);
+ else
+ mu = repmat(cwr.muY, 1, T) + cwr.weightsY * X;
+ Sigma = repmat(cwr.SigmaY, [1 1 T]);
+ %for t=1:T
+ % mu(:,t) = cwr.muY + cwr.weightsY*X(:,t);
+ % Sigma(:,:,t) = cwr.SigmaY;
+ %end
+ end
+ if comp_mask, mask = gaussian_prob(mask_data, mu, Sigma); end
+ weights = [];
+ return;
+end
+
+
+% likX(c,t) = p(x(:,t) | c)
+likX = mixgauss_prob(X, cwr.muX, cwr.SigmaX);
+weights = normalize(repmat(cwr.priorC, 1, T) .* likX, 1);
+for t=1:T
+ mut = zeros(ny, nc);
+ for c=1:nc
+ mut(:,c) = cwr.muY(:,c) + cwr.weightsY(:,:,c)*X(:,t);
+ if comp_mask
+ mask = mask + gaussian_prob(mask_data, mut(:,c), cwr.SigmaY(:,:,c)) * weights(c);
+ end
+ end
+ %w = normalise(cwr.priorC(:) .* likX(:,t));
+ [mu(:,t), Sigma(:,:,t)] = collapse_mog(mut, cwr.SigmaY, weights(:,t));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/cwr_prob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/cwr_prob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function [likXandY, likYgivenX, post] = cwr_prob(cwr, X, Y);
+% CWR_EVAL_PDF cluster weighted regression: evaluate likelihood of Y given X
+% function [likXandY, likYgivenX, post] = cwr_prob(cwr, X, Y);
+%
+% likXandY(t) = p(x(:,t), y(:,t))
+% likXgivenY(t) = p(x(:,t)| y(:,t))
+% post(c,t) = p(c | x(:,t), y(:,t))
+
+[nx N] = size(X);
+nc = length(cwr.priorC);
+
+if nc == 1
+ [mu, Sigma] = cwr_predict(cwr, X);
+ likY = gaussian_prob(Y, mu, Sigma);
+ likXandY = likY;
+ likYgivenX = likY;
+ post = ones(1,N);
+ return;
+end
+
+
+% likY(c,t) = p(y(:,t) | c)
+likY = clg_prob(X, Y, cwr.muY, cwr.SigmaY, cwr.weightsY);
+
+% likX(c,t) = p(x(:,t) | c)
+[junk, likX] = mixgauss_prob(X, cwr.muX, cwr.SigmaX);
+likX = squeeze(likX);
+
+% prior(c,t) = p(c)
+prior = repmat(cwr.priorC(:), 1, N);
+
+post = likX .* likY .* prior;
+likXandY = sum(post, 1);
+post = post ./ repmat(likXandY, nc, 1);
+%loglik = sum(log(lik));
+%loglik = log(lik);
+
+likX = sum(likX .* prior, 1);
+likYgivenX = likXandY ./ likX;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/cwr_readme.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/cwr_readme.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+This directory implements Cluster Weighted Regression, as described in
+Neil Gershenfeld, "The nature of mathematical modelling", p182.
+(See also http://www.media.mit.edu/physics/publications/books/nmm/files/index.html)
+
+Written by K. Murphy, 2 May 2003
+
+The model is as follows:
+
+X<--|
+| Q
+v |
+Y<--
+
+where Q is a discrete latent mixture variable.
+
+A mixture of experts has an X->Q arc instead of a Q->X arc;
+the X->Q arc is modelled by a softmax, which is slightly harder to fit than a
+mixture of Gaussians.
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/cwr_test.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/cwr_test.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,80 @@
+% Verify that my code gives the same results as the 1D example at
+% http://www.media.mit.edu/physics/publications/books/nmm/files/cwm.m
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+x = (-10:10)';
+y = double(x > 0);
+npts = length(x);
+plot(x,y,'+')
+
+nclusters = 4;
+nplot = 100;
+xplot = 24*(1:nplot)'/nplot - 12;
+
+mux = 20*rand(1,nclusters) - 10;
+muy = zeros(1,nclusters);
+varx = ones(1,nclusters);
+vary = ones(1,nclusters);
+pc = 1/nclusters * ones(1,nclusters);
+
+
+I = repmat(eye(1,1), [1 1 nclusters]);
+O = repmat(zeros(1,1), [1 1 nclusters]);
+X = x(:)';
+Y = y(:)';
+
+% Do 1 iteration of EM
+
+%cwr = cwr_em(X, Y, nclusters, 'muX', mux, 'muY', muy, 'SigmaX', I, 'cov_typeX', 'spherical', 'SigmaY', I, 'cov_typeY', 'spherical', 'priorC', pc, 'weightsY', O, 'init_params', 0, 'clamp_weights', 1, 'max_iter', 1, 'cov_priorX', zeros(1,1,nclusters), 'cov_priorY', zeros(1,1,nclusters));
+
+cwr = cwr_em(X, Y, nclusters, 'muX', mux, 'muY', muy, 'SigmaX', I, 'cov_typeX', 'spherical', 'SigmaY', I, 'cov_typeY', 'spherical', 'priorC', pc, 'weightsY', O, 'create_init_params', 0, 'clamp_weights', 1, 'max_iter', 1);
+
+
+% Check this matches Gershenfeld's code
+
+% E step
+% px(t,c) = prob(x(t) | c)
+px = exp(-(kron(x,ones(1,nclusters)) ...
+ - kron(ones(npts,1),mux)).^2 ...
+ ./ (2*kron(ones(npts,1),varx))) ...
+ ./ sqrt(2*pi*kron(ones(npts,1),varx));
+py = exp(-(kron(y,ones(1,nclusters)) ...
+ - kron(ones(npts,1),muy)).^2 ...
+ ./ (2*kron(ones(npts,1),vary))) ...
+ ./ sqrt(2*pi*kron(ones(npts,1),vary));
+p = px .* py .* kron(ones(npts,1),pc);
+pp = p ./ kron(sum(p,2),ones(1,nclusters));
+
+% M step
+eps = 0.01;
+pc2 = sum(pp)/npts;
+
+mux2 = sum(kron(x,ones(1,nclusters)) .* pp) ...
+ ./ (npts*pc2);
+varx2 = eps + sum((kron(x,ones(1,nclusters)) ...
+ - kron(ones(npts,1),mux2)).^2 .* pp) ...
+ ./ (npts*pc2);
+muy2 = sum(kron(y,ones(1,nclusters)) .* pp) ...
+ ./ (npts*pc2);
+vary2 = eps + sum((kron(y,ones(1,nclusters)) ...
+ - kron(ones(npts,1),muy2)).^2 .* pp) ...
+ ./ (npts*pc2);
+
+
+denom = (npts*pc2);
+% denom(c) = N*pc(c) = w(c) = sum_t pp(c,t)
+% since pc(c) = sum_t pp(c,t) / N
+
+cwr_mux = cwr.muX;
+assert(approxeq(mux2, cwr_mux))
+cwr_SigmaX = squeeze(cwr.SigmaX)';
+assert(approxeq(varx2, cwr_SigmaX))
+
+cwr_muy = cwr.muY;
+assert(approxeq(muy2, cwr_muy))
+cwr_SigmaY = squeeze(cwr.SigmaY)';
+assert(approxeq(vary2, cwr_SigmaY))
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/dirichlet_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/dirichlet_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function theta = dirichlet_sample(alpha, N)
+% SAMPLE_DIRICHLET Sample N vectors from Dir(alpha(1), ..., alpha(k))
+% theta = sample_dirichlet(alpha, N)
+% theta(i,j) = i'th sample of theta_j, where theta ~ Dir
+
+% We use the method from p. 482 of "Bayesian Data Analysis", Gelman et al.
+
+assert(alpha > 0);
+k = length(alpha);
+theta = zeros(N, k);
+scale = 1; % arbitrary
+for i=1:k
+ %theta(:,i) = gamrnd(alpha(i), scale, N, 1);
+ theta(:,i) = gamma_sample(alpha(i), scale, N, 1);
+end
+%theta = mk_stochastic(theta);
+S = sum(theta,2);
+theta = theta ./ repmat(S, 1, k);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/dirichletpdf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/dirichletpdf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function p = dirichletpdf(x, alpha)
+%DIRICHLETPDF Dirichlet probability density function.
+% p = dirichletpdf(x, alpha) returns the probability of vector
+% x under the Dirichlet distribution with parameter vector
+% alpha.
+%
+% Author: David Ross
+
+%-------------------------------------------------
+% Check the input
+%-------------------------------------------------
+error(nargchk(2,2,nargin));
+
+% enusre alpha is a vector
+if min(size(alpha)) ~= 1 | ndims(alpha) > 2 | length(alpha) == 1
+ error('alpha must be a vector');
+end
+
+% ensure x is is a vector of the same size as alpha
+if any(size(x) ~= size(alpha))
+ error('x and alpha must be the same size');
+end
+
+
+%-------------------------------------------------
+% Main
+%-------------------------------------------------
+if any(x < 0)
+ p = 0;
+elseif sum(x) ~= 1
+ disp(['dirichletpdf warning: sum(x)~=1, but this may be ' ...
+ 'due to numerical issues']);
+ p = 0;
+else
+ z = gammaln(sum(alpha)) - sum(gammaln(alpha));
+ z = exp(z);
+
+ p = z * prod(x.^(alpha-1));
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/dirichletrnd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/dirichletrnd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function x = dirichletrnd(alpha)
+%DIRICHLETRND Random vector from a dirichlet distribution.
+% x = dirichletrnd(alpha) returns a vector randomly selected
+% from the Dirichlet distribution with parameter vector alpha.
+%
+% The algorithm used is the following:
+% For each alpha(i), generate a value s(i) with distribution
+% Gamma(alpha(i),1). Now x(i) = s(i) / sum_j s(j).
+%
+% The above algorithm was recounted to me by Radford Neal, but
+% a reference would be appreciated...
+% Do the gamma parameters always have to be 1?
+%
+% Author: David Ross
+% $Id: dirichletrnd.m,v 1.1.1.1 2005/05/22 23:32:12 yozhik Exp $
+
+%-------------------------------------------------
+% Check the input
+%-------------------------------------------------
+error(nargchk(1,1,nargin));
+
+if min(size(alpha)) ~= 1 | length(alpha) < 2
+ error('alpha must be a vector of length at least 2');
+end
+
+
+%-------------------------------------------------
+% Main
+%-------------------------------------------------
+gamma_vals = gamrnd(alpha, ones(size(alpha)), size(alpha));
+denom = sum(gamma_vals);
+x = gamma_vals / denom;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/distchck.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/distchck.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,173 @@
+function [errorcode,out1,out2,out3,out4] = distchck(nparms,arg1,arg2,arg3,arg4)
+%DISTCHCK Checks the argument list for the probability functions.
+
+% B.A. Jones 1-22-93
+% Copyright (c) 1993-98 by The MathWorks, Inc.
+% $Revision: 1.1.1.1 $ $Date: 2005/04/26 02:29:18 $
+
+errorcode = 0;
+
+if nparms == 1
+ out1 = arg1;
+ return;
+end
+
+if nparms == 2
+ [r1 c1] = size(arg1);
+ [r2 c2] = size(arg2);
+ scalararg1 = (prod(size(arg1)) == 1);
+ scalararg2 = (prod(size(arg2)) == 1);
+ if ~scalararg1 & ~scalararg2
+ if r1 ~= r2 | c1 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+ if scalararg1
+ out1 = arg1(ones(r2,1),ones(c2,1));
+ else
+ out1 = arg1;
+ end
+ if scalararg2
+ out2 = arg2(ones(r1,1),ones(c1,1));
+ else
+ out2 = arg2;
+ end
+end
+
+if nparms == 3
+ [r1 c1] = size(arg1);
+ [r2 c2] = size(arg2);
+ [r3 c3] = size(arg3);
+ scalararg1 = (prod(size(arg1)) == 1);
+ scalararg2 = (prod(size(arg2)) == 1);
+ scalararg3 = (prod(size(arg3)) == 1);
+
+ if ~scalararg1 & ~scalararg2
+ if r1 ~= r2 | c1 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg1 & ~scalararg3
+ if r1 ~= r3 | c1 ~= c3
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg3 & ~scalararg2
+ if r3 ~= r2 | c3 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg1
+ out1 = arg1;
+ end
+ if ~scalararg2
+ out2 = arg2;
+ end
+ if ~scalararg3
+ out3 = arg3;
+ end
+ rows = max([r1 r2 r3]);
+ columns = max([c1 c2 c3]);
+
+ if scalararg1
+ out1 = arg1(ones(rows,1),ones(columns,1));
+ end
+ if scalararg2
+ out2 = arg2(ones(rows,1),ones(columns,1));
+ end
+ if scalararg3
+ out3 = arg3(ones(rows,1),ones(columns,1));
+ end
+ out4 =[];
+
+end
+
+if nparms == 4
+ [r1 c1] = size(arg1);
+ [r2 c2] = size(arg2);
+ [r3 c3] = size(arg3);
+ [r4 c4] = size(arg4);
+ scalararg1 = (prod(size(arg1)) == 1);
+ scalararg2 = (prod(size(arg2)) == 1);
+ scalararg3 = (prod(size(arg3)) == 1);
+ scalararg4 = (prod(size(arg4)) == 1);
+
+ if ~scalararg1 & ~scalararg2
+ if r1 ~= r2 | c1 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg1 & ~scalararg3
+ if r1 ~= r3 | c1 ~= c3
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg1 & ~scalararg4
+ if r1 ~= r4 | c1 ~= c4
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg3 & ~scalararg2
+ if r3 ~= r2 | c3 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg4 & ~scalararg2
+ if r4 ~= r2 | c4 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg3 & ~scalararg4
+ if r3 ~= r4 | c3 ~= c4
+ errorcode = 1;
+ return;
+ end
+ end
+
+
+ if ~scalararg1
+ out1 = arg1;
+ end
+ if ~scalararg2
+ out2 = arg2;
+ end
+ if ~scalararg3
+ out3 = arg3;
+ end
+ if ~scalararg4
+ out4 = arg4;
+ end
+
+ rows = max([r1 r2 r3 r4]);
+ columns = max([c1 c2 c3 c4]);
+ if scalararg1
+ out1 = arg1(ones(rows,1),ones(columns,1));
+ end
+ if scalararg2
+ out2 = arg2(ones(rows,1),ones(columns,1));
+ end
+ if scalararg3
+ out3 = arg3(ones(rows,1),ones(columns,1));
+ end
+ if scalararg4
+ out4 = arg4(ones(rows,1),ones(columns,1));
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/eigdec.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/eigdec.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function [evals, evec] = eigdec(x, N)
+%EIGDEC Sorted eigendecomposition
+%
+% Description
+% EVALS = EIGDEC(X, N computes the largest N eigenvalues of the
+% matrix X in descending order. [EVALS, EVEC] = EIGDEC(X, N) also
+% computes the corresponding eigenvectors.
+%
+% See also
+% PCA, PPCA
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargout == 1
+ evals_only = logical(1);
+else
+ evals_only = logical(0);
+end
+
+if N ~= round(N) | N < 1 | N > size(x, 2)
+ error('Number of PCs must be integer, >0, < dim');
+end
+
+% Find the eigenvalues of the data covariance matrix
+if evals_only
+ % Use eig function as always more efficient than eigs here
+ temp_evals = eig(x);
+else
+ % Use eig function unless fraction of eigenvalues required is tiny
+ if (N/size(x, 2)) > 0.04
+ fprintf('netlab pca: using eig\n');
+ [temp_evec, temp_evals] = eig(x);
+ else
+ options.disp = 0;
+ fprintf('netlab pca: using eigs\n');
+ [temp_evec, temp_evals] = eigs(x, N, 'LM', options);
+ end
+ temp_evals = diag(temp_evals);
+end
+
+% Eigenvalues nearly always returned in descending order, but just
+% to make sure.....
+[evals perm] = sort(-temp_evals);
+evals = -evals(1:N);
+%evec=temp_evec(:,1:N);
+if ~evals_only
+ if evals == temp_evals(1:N)
+ % Originals were in order
+ evec = temp_evec(:, 1:N);
+ return
+ else
+ fprintf('netlab pca: sorting evec\n');
+ % Need to reorder the eigenvectors
+ for i=1:N
+ evec(:,i) = temp_evec(:,perm(i));
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/est_transmat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/est_transmat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function [A,C] = est_transmat(seq)
+% ESTIMATE_TRANSMAT Max likelihood of a Markov chain transition matrix
+% [A,C] = estimate_transmat(seq)
+%
+% seq is a vector of positive integers
+%
+% e.g., seq = [1 2 1 2 3], C(1,2)=2, C(2,1)=1, C(2,3)=1, so
+% A(1,:)=[0 1 0], A(2,:) = [0.5 0 0.5],
+% all other entries are 0
+
+% Use a trick with sparse matrices to count the number of each transition.
+% From http://www.mathworks.com/company/newsletter/may03/dna.shtml
+
+C = full(sparse(seq(1:end-1), seq(2:end),1));
+A = mk_stochastic(C);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/fit_paritioned_model_testfn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/fit_paritioned_model_testfn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function model = foo(inputs, outputs, varargin)
+
+model.inputs = inputs;
+model.outputs = outputs;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/fit_partitioned_model.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/fit_partitioned_model.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function [model, partition_size] = fit_partitioned_model(...
+ inputs, outputs, selectors, sel_sizes, min_size, partition_names, fn_name, varargin)
+%function [models, partition_sizes] = fit_partitioned_model(...
+% inputs, outputs, selectors, sel_sizes, min_size, partition_names, fn_name, varargin)
+%
+% Fit models to different subsets (columns) of the input/output data,
+% as chosen by the selectors matrix. If there is only output data, set input=[].
+% If there is less than min_size data in partition i,
+% we set model{i} = []
+%
+% Example:
+% selectors = [1 2 1 1 1
+% 1 2 2 1 2]
+% sel_sizes = [2 2] so there are 4 models: (1,1), (2,1), (1,2), (2,2)
+% We fit model{1} to data from columns 1,4
+% We fit model{2} to no data
+% We fit model{3} to data from column 3,5
+% We fit model{4} to data from column 2 (assuming min_size <= 1)
+%
+% For each partition, we call the specified function with the specified arguments
+% as follows:
+% model{i} = fn(input(:,cols{i}), output(:,cols{i}), args)
+% (We omit input if [])
+% partition_size(i) is the amount of data in the i'th partition.
+%
+% Example use: row 1 of selectors is whether an object is present/absent
+% and row 2 is the location.
+%
+% Demo:
+% inputs = 1:5; outputs = 6:10; selectors = as above
+% fn = 'fit_partitioned_model_testfn';
+% [model, partition_size] = fit_partitioned_model(inputs, outputs, selectors, [2 2], fn)
+% should produce
+% model{1}.input = [1 4], model{1}.output = [6 9]
+% model{2} = []
+% model{3}.input = [3 5], model{3}.output = [8 10],
+% model{4}.input = [2], model{3}.output = [7],
+% partition_size = [2 0 2 1]
+
+
+sel_ndx = subv2ind(sel_sizes, selectors');
+Nmodels = prod(sel_sizes);
+model = cell(1, Nmodels);
+partition_size = zeros(1, Nmodels);
+for m=1:Nmodels
+ ndx = find(sel_ndx==m);
+ partition_size(m) = length(ndx);
+ if ~isempty(partition_names) % & (partition_size(m) < min_size)
+ fprintf('partition %s has size %d, min size = %d\n', ...
+ partition_names{m}, partition_size(m), min_size);
+ end
+ if partition_size(m) >= min_size
+ if isempty(inputs)
+ model{m} = feval(fn_name, outputs(:, ndx), varargin{:});
+ else
+ model{m} = feval(fn_name, inputs(:,ndx), outputs(:, ndx), varargin{:});
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/gamma_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/gamma_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,126 @@
+function r = gamrnd(a,b,m,n);
+%GAMRND Random matrices from gamma distribution.
+% R = GAMRND(A,B) returns a matrix of random numbers chosen
+% from the gamma distribution with parameters A and B.
+% The size of R is the common size of A and B if both are matrices.
+% If either parameter is a scalar, the size of R is the size of the other
+% parameter. Alternatively, R = GAMRND(A,B,M,N) returns an M by N matrix.
+%
+% Some references refer to the gamma distribution
+% with a single parameter. This corresponds to GAMRND
+% with B = 1. (See Devroye, pages 401-402.)
+
+% GAMRND uses a rejection or an inversion method depending on the
+% value of A.
+
+% References:
+% [1] L. Devroye, "Non-Uniform Random Variate Generation",
+% Springer-Verlag, 1986
+
+% B.A. Jones 2-1-93
+% Copyright (c) 1993-98 by The MathWorks, Inc.
+% $Revision: 1.1.1.1 $ $Date: 2005/04/26 02:29:18 $
+
+if nargin < 2,
+ error('Requires at least two input arguments.');
+end
+
+
+if nargin == 2
+ [errorcode rows columns] = rndcheck(2,2,a,b);
+end
+
+if nargin == 3
+ [errorcode rows columns] = rndcheck(3,2,a,b,m);
+end
+
+if nargin == 4
+ [errorcode rows columns] = rndcheck(4,2,a,b,m,n);
+end
+
+if errorcode > 0
+ error('Size information is inconsistent.');
+end
+
+% Initialize r to zero.
+lth = rows*columns;
+r = zeros(lth,1);
+a = a(:); b = b(:);
+
+scalara = (length(a) == 1);
+if scalara
+ a = a*ones(lth,1);
+end
+
+scalarb = (length(b) == 1);
+if scalarb
+ b = b*ones(lth,1);
+end
+
+% If a == 1, then gamma is exponential. (Devroye, page 405).
+k = find(a == 1);
+if any(k)
+ r(k) = -b(k) .* log(rand(size(k)));
+end
+
+
+k = find(a < 1 & a > 0);
+% (Devroye, page 418 Johnk's generator)
+if any(k)
+ c = zeros(lth,1);
+ d = zeros(lth,1);
+ c(k) = 1 ./ a(k);
+ d(k) = 1 ./ (1 - a(k));
+ accept = k;
+ while ~isempty(accept)
+ u = rand(size(accept));
+ v = rand(size(accept));
+ x = u .^ c(accept);
+ y = v .^ d(accept);
+ k1 = find((x + y) <= 1);
+ if ~isempty(k1)
+ e = -log(rand(size(k1)));
+ r(accept(k1)) = e .* x(k1) ./ (x(k1) + y(k1));
+ accept(k1) = [];
+ end
+ end
+ r(k) = r(k) .* b(k);
+end
+
+% Use a rejection method for a > 1.
+k = find(a > 1);
+% (Devroye, page 410 Best's algorithm)
+bb = zeros(size(a));
+c = bb;
+if any(k)
+ bb(k) = a(k) - 1;
+ c(k) = 3 * a(k) - 3/4;
+ accept = k;
+ count = 1;
+ while ~isempty(accept)
+ m = length(accept);
+ u = rand(m,1);
+ v = rand(m,1);
+ w = u .* (1 - u);
+ y = sqrt(c(accept) ./ w) .* (u - 0.5);
+ x = bb(accept) + y;
+ k1 = find(x >= 0);
+ if ~isempty(k1)
+ z = 64 * (w .^ 3) .* (v .^ 2);
+ k2 = (z(k1) <= (1 - 2 * (y(k1) .^2) ./ x(k1)));
+ k3 = k1(find(k2));
+ r(accept(k3)) = x(k3);
+ k4 = k1(find(~k2));
+ k5 = k4(find(log(z(k4)) <= (2*(bb(accept(k4)).*log(x(k4)./bb(accept(k4)))-y(k4)))));
+ r(accept(k5)) = x(k5);
+ omit = [k3; k5];
+ accept(omit) = [];
+ end
+ end
+ r(k) = r(k) .* b(k);
+end
+
+% Return NaN if a or b is not positive.
+r(b <= 0 | a <= 0) = NaN;
+
+r = reshape(r,rows,columns);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/gaussian_prob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/gaussian_prob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function p = gaussian_prob(x, m, C, use_log)
+% GAUSSIAN_PROB Evaluate a multivariate Gaussian density.
+% p = gaussian_prob(X, m, C)
+% p(i) = N(X(:,i), m, C) where C = covariance matrix and each COLUMN of x is a datavector
+
+% p = gaussian_prob(X, m, C, 1) returns log N(X(:,i), m, C) (to prevents underflow).
+%
+% If X has size dxN, then p has size Nx1, where N = number of examples
+
+if nargin < 4, use_log = 0; end
+
+if length(m)==1 % scalar
+ x = x(:)';
+end
+[d N] = size(x);
+%assert(length(m)==d); % slow
+m = m(:);
+M = m*ones(1,N); % replicate the mean across columns
+denom = (2*pi)^(d/2)*sqrt(abs(det(C)));
+mahal = sum(((x-M)'*inv(C)).*(x-M)',2); % Chris Bregler's trick
+if any(mahal<0)
+ warning('mahal < 0 => C is not psd')
+end
+if use_log
+ p = -0.5*mahal - log(denom);
+else
+ p = exp(-0.5*mahal) / (denom+eps);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/gaussian_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/gaussian_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function x = gsamp(mu, covar, nsamp)
+%GSAMP Sample from a Gaussian distribution.
+%
+% Description
+%
+% X = GSAMP(MU, COVAR, NSAMP) generates a sample of size NSAMP from a
+% D-dimensional Gaussian distribution. The Gaussian density has mean
+% vector MU and covariance matrix COVAR, and the matrix X has NSAMP
+% rows in which each row represents a D-dimensional sample vector.
+%
+% See also
+% GAUSS, DEMGAUSS
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+d = size(covar, 1);
+
+mu = reshape(mu, 1, d); % Ensure that mu is a row vector
+
+[evec, eval] = eig(covar);
+
+coeffs = randn(nsamp, d)*sqrt(eval);
+
+x = ones(nsamp, 1)*mu + coeffs*evec';
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/histCmpChi2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/histCmpChi2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function d = histCmpChi2(h1, h2)
+% Compare two histograms using chi-squared
+% function d = histCmpChi2(h1, h2)
+%
+% d(i,j) = chi^2(h1(i,:), h2(j,:)) = sum_b (h1(i,b)-h2(j,b)^2 / (h1(i,b) + h2(j,b))
+
+[N B] = size(h1);
+d = zeros(N,N);
+for i=1:N
+ h1i = repmat(h1(i,:), N, 1);
+ numer = (h1i - h2).^2;
+ denom = h1i + h2 + eps; % if denom=0, then numer=0
+ d(i,:) = sum(numer ./ denom, 2);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/linear_regression.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/linear_regression.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,68 @@
+function [muY, SigmaY, weightsY] = linear_regression(X, Y, varargin)
+% LINEAR_REGRESSION Fit params for P(Y|X) = N(Y; W X + mu, Sigma)
+%
+% X(:, t) is the t'th input example
+% Y(:, t) is the t'th output example
+%
+% Kevin Murphy, August 2003
+%
+% This is a special case of cwr_em with 1 cluster.
+% You can also think of it as a front end to clg_Mstep.
+
+[cov_typeY, clamp_weights, muY, SigmaY, weightsY,...
+ cov_priorY, regress, clamp_covY] = process_options(...
+ varargin, ...
+ 'cov_typeY', 'full', 'clamp_weights', 0, ...
+ 'muY', [], 'SigmaY', [], 'weightsY', [], ...
+ 'cov_priorY', [], 'regress', 1, 'clamp_covY', 0);
+
+[nx N] = size(X);
+[ny N2] = size(Y);
+if N ~= N2
+ error(sprintf('nsamples X (%d) ~= nsamples Y (%d)', N, N2));
+end
+
+w = 1/N;
+WYbig = Y*w;
+WYY = WYbig * Y';
+WY = sum(WYbig, 2);
+WYTY = sum(diag(WYbig' * Y));
+if ~regress
+ % This is just fitting an unconditional Gaussian
+ weightsY = [];
+ [muY, SigmaY] = ...
+ mixgauss_Mstep(1, WY, WYY, WYTY, ...
+ 'cov_type', cov_typeY, 'cov_prior', cov_priorY);
+ % There is a much easier way...
+ assert(approxeq(muY, mean(Y')))
+ assert(approxeq(SigmaY, cov(Y') + 0.01*eye(ny)))
+else
+ % This is just linear regression
+ WXbig = X*w;
+ WXX = WXbig * X';
+ WX = sum(WXbig, 2);
+ WXTX = sum(diag(WXbig' * X));
+ WXY = WXbig * Y';
+ [muY, SigmaY, weightsY] = ...
+ clg_Mstep(1, WY, WYY, WYTY, WX, WXX, WXY, ...
+ 'cov_type', cov_typeY, 'cov_prior', cov_priorY);
+end
+if clamp_covY, SigmaY = SigmaY; end
+if clamp_weights, weightsY = weightsY; end
+
+if nx==1 & ny==1 & regress
+ P = polyfit(X,Y); % Y = P(1) X^1 + P(2) X^0 = ax + b
+ assert(approxeq(muY, P(2)))
+ assert(approxeq(weightsY, P(1)))
+end
+
+%%%%%%%% Test
+if 0
+ c1 = randn(2,100); c2 = randn(2,100);
+ y = c2(1,:); X = [ones(size(c1,2),1) c1'];
+ b = regress(y(:), X); % stats toolbox
+ [m,s,w] = linear_regression(c1, y);
+ assert(approxeq(b(1),m))
+ assert(approxeq(b(2), w(1)))
+ assert(approxeq(b(3), w(2)))
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/logist2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/logist2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,115 @@
+function [beta,p,lli] = logist2(y,x,w)
+% [beta,p,lli] = logist2(y,x)
+%
+% 2-class logistic regression.
+%
+% INPUT
+% y Nx1 colum vector of 0|1 class assignments
+% x NxK matrix of input vectors as rows
+% [w] Nx1 vector of sample weights
+%
+% OUTPUT
+% beta Kx1 column vector of model coefficients
+% p Nx1 column vector of fitted class 1 posteriors
+% lli log likelihood
+%
+% Class 1 posterior is 1 / (1 + exp(-x*beta))
+%
+% David Martin
+% April 16, 2002
+
+% Copyright (C) 2002 David R. Martin
+%
+% This program is free software; you can redistribute it and/or
+% modify it under the terms of the GNU General Public License as
+% published by the Free Software Foundation; either version 2 of the
+% License, or (at your option) any later version.
+%
+
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+% 02111-1307, USA, or see http://www.gnu.org/copyleft/gpl.html.
+
+error(nargchk(2,3,nargin));
+
+% check inputs
+if size(y,2) ~= 1,
+ error('Input y not a column vector.');
+end
+if size(y,1) ~= size(x,1),
+ error('Input x,y sizes mismatched.');
+end
+
+% get sizes
+[N,k] = size(x);
+
+% if sample weights weren't specified, set them to 1
+if nargin < 3,
+ w = 1;
+end
+
+% normalize sample weights so max is 1
+w = w / max(w);
+
+% initial guess for beta: all zeros
+beta = zeros(k,1);
+
+% Newton-Raphson via IRLS,
+% taken from Hastie/Tibshirani/Friedman Section 4.4.
+iter = 0;
+lli = 0;
+while 1==1,
+ iter = iter + 1;
+
+ % fitted probabilities
+ p = 1 ./ (1 + exp(-x*beta));
+
+ % log likelihood
+ lli_prev = lli;
+ lli = sum( w .* (y.*log(p+eps) + (1-y).*log(1-p+eps)) );
+
+ % least-squares weights
+ wt = w .* p .* (1-p);
+
+ % derivatives of likelihood w.r.t. beta
+ deriv = x'*(w.*(y-p));
+
+ % Hessian of likelihood w.r.t. beta
+ % hessian = x'Wx, where W=diag(w)
+ % Do it this way to be memory efficient and fast.
+ hess = zeros(k,k);
+ for i = 1:k,
+ wxi = wt .* x(:,i);
+ for j = i:k,
+ hij = wxi' * x(:,j);
+ hess(i,j) = -hij;
+ hess(j,i) = -hij;
+ end
+ end
+
+ % make sure Hessian is well conditioned
+ if (rcond(hess) < eps),
+ error(['Stopped at iteration ' num2str(iter) ...
+ ' because Hessian is poorly conditioned.']);
+ break;
+ end;
+
+ % Newton-Raphson update step
+ step = hess\deriv;
+ beta = beta - step;
+
+ % termination criterion based on derivatives
+ tol = 1e-6;
+ if abs(deriv'*step/k) < tol, break; end;
+
+ % termination criterion based on log likelihood
+% tol = 1e-4;
+% if abs((lli-lli_prev)/(lli+lli_prev)) < 0.5*tol, break; end;
+end;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/logist2Apply.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/logist2Apply.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function p = logist2Apply(beta, x)
+% LOGIST2APPLY 2 class logistic regression: compute posterior prob of class 1
+% function p = logist2Apply(beta, x)
+%
+% x(:,i) - each COLUMN is a test case; we append 1s automatically, if appropriate
+
+[D Ncases] = size(x);
+if length(beta)==D+1
+ F = [x; ones(1,Ncases)];
+else
+ F = x;
+end
+p = 1./(1+exp(-beta(:)'*F));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/logist2ApplyRegularized.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/logist2ApplyRegularized.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+function prob = logist2ApplyRegularized(net, features)
+
+prob = glmfwd(net, features')';
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/logist2Fit.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/logist2Fit.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [beta, p] = logist2Fit(y, x, addOne, w)
+% LOGIST2FIT 2 class logsitic classification
+% function beta = logist2Fit(y,x, addOne)
+%
+% y(i) = 0/1
+% x(:,i) = i'th input - we optionally append 1s to last dimension
+% w(i) = optional weight
+%
+% beta(j)- regression coefficient
+
+if nargin < 3, addOne = 1; end
+if nargin < 4, w = 1; end
+
+Ncases = size(x,2);
+if Ncases ~= length(y)
+ error(sprintf('size of data = %dx%d, size of labels=%d', size(x,1), size(x,2), length(y)))
+end
+if addOne
+ x = [x; ones(1,Ncases)];
+end
+[beta, p] = logist2(y(:), x', w(:));
+beta = beta(:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/logist2FitRegularized.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/logist2FitRegularized.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function [net, niter] = logist2FitRegularized(labels, features, maxIter)
+
+if nargin < 3, maxIter = 100; end
+
+[D N] = size(features);
+weightPrior = 0.5;
+net = glm(D, 1, 'logistic', weightPrior);
+options = foptions;
+options(14) = maxIter;
+[net, options] = glmtrain(net, options, features', labels(:));
+niter = options(14);
+%w = logist2Fit(labelsPatches(jValidPatches), features(:, jValidPatches));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/logistK.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/logistK.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,287 @@
+function [beta,post,lli] = logistK(x,y,w,beta)
+% [beta,post,lli] = logistK(x,y,beta,w)
+%
+% k-class logistic regression with optional sample weights
+%
+% k = number of classes
+% n = number of samples
+% d = dimensionality of samples
+%
+% INPUT
+% x dxn matrix of n input column vectors
+% y kxn vector of class assignments
+% [w] 1xn vector of sample weights
+% [beta] dxk matrix of model coefficients
+%
+% OUTPUT
+% beta dxk matrix of fitted model coefficients
+% (beta(:,k) are fixed at 0)
+% post kxn matrix of fitted class posteriors
+% lli log likelihood
+%
+% Let p(i,j) = exp(beta(:,j)'*x(:,i)),
+% Class j posterior for observation i is:
+% post(j,i) = p(i,j) / (p(i,1) + ... p(i,k))
+%
+% See also logistK_eval.
+%
+% David Martin
+% May 3, 2002
+
+% Copyright (C) 2002 David R. Martin
+%
+% This program is free software; you can redistribute it and/or
+% modify it under the terms of the GNU General Public License as
+% published by the Free Software Foundation; either version 2 of the
+% License, or (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+% 02111-1307, USA, or see http://www.gnu.org/copyleft/gpl.html.
+
+% TODO - this code would be faster if x were transposed
+
+error(nargchk(2,4,nargin));
+
+debug = 0;
+if debug>0,
+ h=figure(1);
+ set(h,'DoubleBuffer','on');
+end
+
+% get sizes
+[d,nx] = size(x);
+[k,ny] = size(y);
+
+% check sizes
+if k < 2,
+ error('Input y must encode at least 2 classes.');
+end
+if nx ~= ny,
+ error('Inputs x,y not the same length.');
+end
+
+n = nx;
+
+% make sure class assignments have unit L1-norm
+sumy = sum(y,1);
+if abs(1-sumy) > eps,
+ sumy = sum(y,1);
+ for i = 1:k, y(i,:) = y(i,:) ./ sumy; end
+end
+clear sumy;
+
+% if sample weights weren't specified, set them to 1
+if nargin < 3,
+ w = ones(1,n);
+end
+
+% normalize sample weights so max is 1
+w = w / max(w);
+
+% if starting beta wasn't specified, initialize randomly
+if nargin < 4,
+ beta = 1e-3*rand(d,k);
+ beta(:,k) = 0; % fix beta for class k at zero
+else
+ if sum(beta(:,k)) ~= 0,
+ error('beta(:,k) ~= 0');
+ end
+end
+
+stepsize = 1;
+minstepsize = 1e-2;
+
+post = computePost(beta,x);
+lli = computeLogLik(post,y,w);
+
+for iter = 1:100,
+ %disp(sprintf(' logist iter=%d lli=%g',iter,lli));
+ vis(x,y,beta,lli,d,k,iter,debug);
+
+ % gradient and hessian
+ [g,h] = derivs(post,x,y,w);
+
+ % make sure Hessian is well conditioned
+ if rcond(h) < eps,
+ % condition with Levenberg-Marquardt method
+ for i = -16:16,
+ h2 = h .* ((1 + 10^i)*eye(size(h)) + (1-eye(size(h))));
+ if rcond(h2) > eps, break, end
+ end
+ if rcond(h2) < eps,
+ warning(['Stopped at iteration ' num2str(iter) ...
+ ' because Hessian can''t be conditioned']);
+ break
+ end
+ h = h2;
+ end
+
+ % save lli before update
+ lli_prev = lli;
+
+ % Newton-Raphson with step-size halving
+ while stepsize >= minstepsize,
+ % Newton-Raphson update step
+ step = stepsize * (h \ g);
+ beta2 = beta;
+ beta2(:,1:k-1) = beta2(:,1:k-1) - reshape(step,d,k-1);
+
+ % get the new log likelihood
+ post2 = computePost(beta2,x);
+ lli2 = computeLogLik(post2,y,w);
+
+ % if the log likelihood increased, then stop
+ if lli2 > lli,
+ post = post2; lli = lli2; beta = beta2;
+ break
+ end
+
+ % otherwise, reduce step size by half
+ stepsize = 0.5 * stepsize;
+ end
+
+ % stop if the average log likelihood has gotten small enough
+ if 1-exp(lli/n) < 1e-2, break, end
+
+ % stop if the log likelihood changed by a small enough fraction
+ dlli = (lli_prev-lli) / lli;
+ if abs(dlli) < 1e-3, break, end
+
+ % stop if the step size has gotten too small
+ if stepsize < minstepsize, brea, end
+
+ % stop if the log likelihood has decreased; this shouldn't happen
+ if lli < lli_prev,
+ warning(['Stopped at iteration ' num2str(iter) ...
+ ' because the log likelihood decreased from ' ...
+ num2str(lli_prev) ' to ' num2str(lli) '.' ...
+ ' This may be a bug.']);
+ break
+ end
+end
+
+if debug>0,
+ vis(x,y,beta,lli,d,k,iter,2);
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% class posteriors
+function post = computePost(beta,x)
+ [d,n] = size(x);
+ [d,k] = size(beta);
+ post = zeros(k,n);
+ bx = zeros(k,n);
+ for j = 1:k,
+ bx(j,:) = beta(:,j)'*x;
+ end
+ for j = 1:k,
+ post(j,:) = 1 ./ sum(exp(bx - repmat(bx(j,:),k,1)),1);
+ end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% log likelihood
+function lli = computeLogLik(post,y,w)
+ [k,n] = size(post);
+ lli = 0;
+ for j = 1:k,
+ lli = lli + sum(w.*y(j,:).*log(post(j,:)+eps));
+ end
+ if isnan(lli),
+ error('lli is nan');
+ end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% gradient and hessian
+%% These are computed in what seems a verbose manner, but it is
+%% done this way to use minimal memory. x should be transposed
+%% to make it faster.
+function [g,h] = derivs(post,x,y,w)
+
+ [k,n] = size(post);
+ [d,n] = size(x);
+
+ % first derivative of likelihood w.r.t. beta
+ g = zeros(d,k-1);
+ for j = 1:k-1,
+ wyp = w .* (y(j,:) - post(j,:));
+ for ii = 1:d,
+ g(ii,j) = x(ii,:) * wyp';
+ end
+ end
+ g = reshape(g,d*(k-1),1);
+
+ % hessian of likelihood w.r.t. beta
+ h = zeros(d*(k-1),d*(k-1));
+ for i = 1:k-1, % diagonal
+ wt = w .* post(i,:) .* (1 - post(i,:));
+ hii = zeros(d,d);
+ for a = 1:d,
+ wxa = wt .* x(a,:);
+ for b = a:d,
+ hii_ab = wxa * x(b,:)';
+ hii(a,b) = hii_ab;
+ hii(b,a) = hii_ab;
+ end
+ end
+ h( (i-1)*d+1 : i*d , (i-1)*d+1 : i*d ) = -hii;
+ end
+ for i = 1:k-1, % off-diagonal
+ for j = i+1:k-1,
+ wt = w .* post(j,:) .* post(i,:);
+ hij = zeros(d,d);
+ for a = 1:d,
+ wxa = wt .* x(a,:);
+ for b = a:d,
+ hij_ab = wxa * x(b,:)';
+ hij(a,b) = hij_ab;
+ hij(b,a) = hij_ab;
+ end
+ end
+ h( (i-1)*d+1 : i*d , (j-1)*d+1 : j*d ) = hij;
+ h( (j-1)*d+1 : j*d , (i-1)*d+1 : i*d ) = hij;
+ end
+ end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% debug/visualization
+function vis (x,y,beta,lli,d,k,iter,debug)
+
+ if debug<=0, return, end
+
+ disp(['iter=' num2str(iter) ' lli=' num2str(lli)]);
+ if debug<=1, return, end
+
+ if d~=3 | k>10, return, end
+
+ figure(1);
+ res = 100;
+ r = abs(max(max(x)));
+ dom = linspace(-r,r,res);
+ [px,py] = meshgrid(dom,dom);
+ xx = px(:); yy = py(:);
+ points = [xx' ; yy' ; ones(1,res*res)];
+ func = zeros(k,res*res);
+ for j = 1:k,
+ func(j,:) = exp(beta(:,j)'*points);
+ end
+ [mval,ind] = max(func,[],1);
+ hold off;
+ im = reshape(ind,res,res);
+ imagesc(xx,yy,im);
+ hold on;
+ syms = {'w.' 'wx' 'w+' 'wo' 'w*' 'ws' 'wd' 'wv' 'w^' 'w<'};
+ for j = 1:k,
+ [mval,ind] = max(y,[],1);
+ ind = find(ind==j);
+ plot(x(1,ind),x(2,ind),syms{j});
+ end
+ pause(0.1);
+
+% eof
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/logistK_eval.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/logistK_eval.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,83 @@
+function [post,lik,lli] = logistK_eval(beta,x,y)
+% [post,lik,lli] = logistK_eval(beta,x,y)
+%
+% Evaluate logistic regression model.
+%
+% INPUT
+% beta dxk model coefficients (as returned by logistK)
+% x dxn matrix of n input column vectors
+% [y] kxn vector of class assignments
+%
+% OUTPUT
+% post kxn fitted class posteriors
+% lik 1xn vector of sample likelihoods
+% lli log likelihood
+%
+% Let p(i,j) = exp(beta(:,j)'*x(:,i)),
+% Class j posterior for observation i is:
+% post(j,i) = p(i,j) / (p(i,1) + ... p(i,k))
+% The likelihood of observation i given soft class assignments
+% y(:,i) is:
+% lik(i) = prod(post(:,i).^y(:,i))
+% The log-likelihood of the model given the labeled samples is:
+% lli = sum(log(lik))
+%
+% See also logistK.
+%
+% David Martin
+% May 7, 2002
+
+% Copyright (C) 2002 David R. Martin
+%
+% This program is free software; you can redistribute it and/or
+% modify it under the terms of the GNU General Public License as
+% published by the Free Software Foundation; either version 2 of the
+% License, or (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+% 02111-1307, USA, or see http://www.gnu.org/copyleft/gpl.html.
+
+error(nargchk(2,3,nargin));
+
+% check sizes
+if size(beta,1) ~= size(x,1),
+ error('Inputs beta,x not the same height.');
+end
+if nargin > 3 & size(y,2) ~= size(x,2),
+ error('Inputs x,y not the same length.');
+end
+
+% get sizes
+[d,k] = size(beta);
+[d,n] = size(x);
+
+% class posteriors
+post = zeros(k,n);
+bx = zeros(k,n);
+for j = 1:k,
+ bx(j,:) = beta(:,j)'*x;
+end
+for j = 1:k,
+ post(j,:) = 1 ./ sum(exp(bx - repmat(bx(j,:),k,1)),1);
+end
+clear bx;
+
+% likelihood of each sample
+if nargout > 1,
+ y = y ./ repmat(sum(y,1),k,1); % L1-normalize class assignments
+ lik = prod(post.^y,1);
+end
+
+% total log likelihood
+if nargout > 2,
+ lli = sum(log(lik+eps));
+end;
+
+% eof
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/marginalize_gaussian.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/marginalize_gaussian.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function [muX, SXX] = marginalize_gaussian(mu, Sigma, X, Y, ns)
+% MARGINALIZE_GAUSSIAN Compute Pr(X) from Pr(X,Y) where X and Y are jointly Gaussian.
+% [muX, SXX] = marginalize_gaussian(mu, Sigma, X, Y, ns)
+
+[muX, muY, SXX, SXY, SYX, SYY] = partition_matrix_vec(mu, Sigma, X, Y, ns);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/matrix_T_pdf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/matrix_T_pdf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function p = matrix_T_pdf(A, M, V, K, n)
+% MATRIX_T_PDF Evaluate the density of a matrix under a Matrix-T distribution
+% p = matrix_T_pdf(A, M, V, K, n)
+
+% See "Bayesian Linear Regression", T. Minka, MIT Tech Report, 2001
+
+[d m] = size(K);
+is = 1:d;
+c1 = prod(gamma((n+1-is)/2)) / prod(gamma((n-m+1-is)/2));
+c2 = det(K)^(d/2) / det(pi*V)^(m/2); %% pi or 2pi?
+p = c1 * c2 * det((A-M)'*inv(V)*(A-M)*K + eye(m))^(-n/2);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/matrix_normal_pdf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/matrix_normal_pdf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function p = matrix_normal_pdf(A, M, V, K)
+% MATRIX_NORMAL_PDF Evaluate the density of a matrix under a Matrix-Normal distribution
+% p = matrix_normal_pdf(A, M, V, K)
+
+% See "Bayesian Linear Regression", T. Minka, MIT Tech Report, 2001
+
+[d m] = size(K);
+c = det(K)^(d/2) / det(2*pi*V)^(m/2);
+p = c * exp(-0.5*tr((A-M)'*inv(V)*(A-M)*K));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mc_stat_distrib.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mc_stat_distrib.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function pi = mc_stat_distrib(P)
+% MC_STAT_DISTRIB Compute stationary distribution of a Markov chain
+% function pi = mc_stat_distrib(P)
+%
+% Each row of P should sum to one; pi is a column vector
+
+% Kevin Murphy, 16 Feb 2003
+
+% The stationary distribution pi satisfies pi P = pi
+% subject to sum_i pi(i) = 1, 0 <= pi(i) <= 1
+% Hence
+% (P' 0n (pi = (pi
+% 1n 0) 1) 1)
+% or P2 pi2 = pi2.
+% Naively we can solve this using (P2 - I(n+1)) pi2 = 0(n+1)
+% or P3 pi2 = 0(n+1), i.e., pi2 = P3 \ zeros(n+1,1)
+% but this is singular (because of the sum-to-one constraint).
+% Hence we replace the last row of P' with 1s instead of appending ones to create P2,
+% and similarly for pi.
+
+n = length(P);
+P4 = P'-eye(n);
+P4(end,:) = 1;
+pi = P4 \ [zeros(n-1,1);1];
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_Mstep.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_Mstep.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,106 @@
+function [mu, Sigma] = mixgauss_Mstep(w, Y, YY, YTY, varargin)
+% MSTEP_COND_GAUSS Compute MLEs for mixture of Gaussians given expected sufficient statistics
+% function [mu, Sigma] = Mstep_cond_gauss(w, Y, YY, YTY, varargin)
+%
+% We assume P(Y|Q=i) = N(Y; mu_i, Sigma_i)
+% and w(i,t) = p(Q(t)=i|y(t)) = posterior responsibility
+% See www.ai.mit.edu/~murphyk/Papers/learncg.pdf.
+%
+% INPUTS:
+% w(i) = sum_t w(i,t) = responsibilities for each mixture component
+% If there is only one mixture component (i.e., Q does not exist),
+% then w(i) = N = nsamples, and
+% all references to i can be replaced by 1.
+% YY(:,:,i) = sum_t w(i,t) y(:,t) y(:,t)' = weighted outer product
+% Y(:,i) = sum_t w(i,t) y(:,t) = weighted observations
+% YTY(i) = sum_t w(i,t) y(:,t)' y(:,t) = weighted inner product
+% You only need to pass in YTY if Sigma is to be estimated as spherical.
+%
+% Optional parameters may be passed as 'param_name', param_value pairs.
+% Parameter names are shown below; default values in [] - if none, argument is mandatory.
+%
+% 'cov_type' - 'full', 'diag' or 'spherical' ['full']
+% 'tied_cov' - 1 (Sigma) or 0 (Sigma_i) [0]
+% 'clamped_cov' - pass in clamped value, or [] if unclamped [ [] ]
+% 'clamped_mean' - pass in clamped value, or [] if unclamped [ [] ]
+% 'cov_prior' - Lambda_i, added to YY(:,:,i) [0.01*eye(d,d,Q)]
+%
+% If covariance is tied, Sigma has size d*d.
+% But diagonal and spherical covariances are represented in full size.
+
+[cov_type, tied_cov, clamped_cov, clamped_mean, cov_prior, other] = ...
+ process_options(varargin,...
+ 'cov_type', 'full', 'tied_cov', 0, 'clamped_cov', [], 'clamped_mean', [], ...
+ 'cov_prior', []);
+
+[Ysz Q] = size(Y);
+N = sum(w);
+if isempty(cov_prior)
+ %cov_prior = zeros(Ysz, Ysz, Q);
+ %for q=1:Q
+ % cov_prior(:,:,q) = 0.01*cov(Y(:,q)');
+ %end
+ cov_prior = repmat(0.01*eye(Ysz,Ysz), [1 1 Q]);
+end
+%YY = reshape(YY, [Ysz Ysz Q]) + cov_prior; % regularize the scatter matrix
+YY = reshape(YY, [Ysz Ysz Q]);
+
+% Set any zero weights to one before dividing
+% This is valid because w(i)=0 => Y(:,i)=0, etc
+w = w + (w==0);
+
+if ~isempty(clamped_mean)
+ mu = clamped_mean;
+else
+ % eqn 6
+ %mu = Y ./ repmat(w(:)', [Ysz 1]);% Y may have a funny size
+ mu = zeros(Ysz, Q);
+ for i=1:Q
+ mu(:,i) = Y(:,i) / w(i);
+ end
+end
+
+if ~isempty(clamped_cov)
+ Sigma = clamped_cov;
+ return;
+end
+
+if ~tied_cov
+ Sigma = zeros(Ysz,Ysz,Q);
+ for i=1:Q
+ if cov_type(1) == 's'
+ % eqn 17
+ s2 = (1/Ysz)*( (YTY(i)/w(i)) - mu(:,i)'*mu(:,i) );
+ Sigma(:,:,i) = s2 * eye(Ysz);
+ else
+ % eqn 12
+ SS = YY(:,:,i)/w(i) - mu(:,i)*mu(:,i)';
+ if cov_type(1)=='d'
+ SS = diag(diag(SS));
+ end
+ Sigma(:,:,i) = SS;
+ end
+ end
+else % tied cov
+ if cov_type(1) == 's'
+ % eqn 19
+ s2 = (1/(N*Ysz))*(sum(YTY,2) + sum(diag(mu'*mu) .* w));
+ Sigma = s2*eye(Ysz);
+ else
+ SS = zeros(Ysz, Ysz);
+ % eqn 15
+ for i=1:Q % probably could vectorize this...
+ SS = SS + YY(:,:,i)/N - mu(:,i)*mu(:,i)';
+ end
+ if cov_type(1) == 'd'
+ Sigma = diag(diag(SS));
+ else
+ Sigma = SS;
+ end
+ end
+end
+
+if tied_cov
+ Sigma = repmat(Sigma, [1 1 Q]);
+end
+Sigma = Sigma + cov_prior;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_classifier_apply.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_classifier_apply.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function [classHatTest, probPos] = mixgauss_classifier_apply(mixgauss, testFeatures)
+
+Bpos = mixgauss_prob(testFeatures, mixgauss.pos.mu, mixgauss.pos.Sigma, mixgauss.pos.prior);
+Bneg = mixgauss_prob(testFeatures, mixgauss.neg.mu, mixgauss.neg.Sigma, mixgauss.neg.prior);
+prior_pos = mixgauss.priorC(1);
+prior_neg = mixgauss.priorC(2);
+post = normalize([Bpos * prior_pos; Bneg * prior_neg], 1);
+probPos = post(1,:)';
+[junk, classHatTest] = max(post);
+classHatTest(find(classHatTest==2))=0;
+classHatTest = classHatTest(:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_classifier_train.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_classifier_train.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,33 @@
+function mixgauss = mixgauss_classifier_train(trainFeatures, trainLabels, nc, varargin)
+% function mixgauss = mixgauss_classifier_train(trainFeatures, trainLabels, nclusters, varargin)
+% trainFeatures(:,i) for i'th example
+% trainLabels should be 0,1
+% To evaluate performance on a tets set, use
+% mixgauss = mixgauss_classifier_train(trainFeatures, trainLabels, nc, 'testFeatures', tf, 'testLabels', tl)
+
+[testFeatures, testLabels, max_iter, thresh, cov_type, mu, Sigma, priorC, method, ...
+ cov_prior, verbose, prune_thresh] = process_options(...
+ varargin, 'testFeatures', [], 'testLabels', [], ...
+ 'max_iter', 10, 'thresh', 0.01, 'cov_type', 'diag', ...
+ 'mu', [], 'Sigma', [], 'priorC', [], 'method', 'kmeans', ...
+ 'cov_prior', [], 'verbose', 0, 'prune_thresh', 0);
+
+Nclasses = 2; % max([trainLabels testLabels]) + 1;
+
+pos = find(trainLabels == 1);
+neg = find(trainLabels == 0);
+
+if verbose, fprintf('fitting pos\n'); end
+[mixgauss.pos.mu, mixgauss.pos.Sigma, mixgauss.pos.prior] = ...
+ mixgauss_em(trainFeatures(:, pos), nc, varargin{:});
+
+if verbose, fprintf('fitting neg\n'); end
+[mixgauss.neg.mu, mixgauss.neg.Sigma, mixgauss.neg.prior] = ...
+ mixgauss_em(trainFeatures(:, neg), nc, varargin{:});
+
+
+if ~isempty(priorC)
+ mixgauss.priorC = priorC;
+else
+ mixgauss.priorC = normalize([length(pos) length(neg)]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_em.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_em.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,74 @@
+function [mu, Sigma, prior] = mixgauss_em(Y, nc, varargin)
+% MIXGAUSS_EM Fit the parameters of a mixture of Gaussians using EM
+% function [mu, Sigma, prior] = mixgauss_em(data, nc, varargin)
+%
+% data(:, t) is the t'th data point
+% nc is the number of clusters
+
+% Kevin Murphy, 13 May 2003
+
+[max_iter, thresh, cov_type, mu, Sigma, method, ...
+ cov_prior, verbose, prune_thresh] = process_options(...
+ varargin, 'max_iter', 10, 'thresh', 1e-2, 'cov_type', 'full', ...
+ 'mu', [], 'Sigma', [], 'method', 'kmeans', ...
+ 'cov_prior', [], 'verbose', 0, 'prune_thresh', 0);
+
+[ny T] = size(Y);
+
+if nc==1
+ % No latent variable, so there is a closed-form solution
+ mu = mean(Y')';
+ Sigma = cov(Y');
+ if strcmp(cov_type, 'diag')
+ Sigma = diag(diag(Sigma));
+ end
+ prior = 1;
+ return;
+end
+
+if isempty(mu)
+ [mu, Sigma, prior] = mixgauss_init(nc, Y, cov_type, method);
+end
+
+previous_loglik = -inf;
+num_iter = 1;
+converged = 0;
+
+%if verbose, fprintf('starting em\n'); end
+
+while (num_iter <= max_iter) & ~converged
+ % E step
+ probY = mixgauss_prob(Y, mu, Sigma, prior); % probY(q,t)
+ [post, lik] = normalize(probY .* repmat(prior, 1, T), 1); % post(q,t)
+ loglik = log(sum(lik));
+
+ % extract expected sufficient statistics
+ w = sum(post,2); % w(c) = sum_t post(c,t)
+ WYY = zeros(ny, ny, nc); % WYY(:,:,c) = sum_t post(c,t) Y(:,t) Y(:,t)'
+ WY = zeros(ny, nc); % WY(:,c) = sum_t post(c,t) Y(:,t)
+ WYTY = zeros(nc,1); % WYTY(c) = sum_t post(c,t) Y(:,t)' Y(:,t)
+ for c=1:nc
+ weights = repmat(post(c,:), ny, 1); % weights(:,t) = post(c,t)
+ WYbig = Y .* weights; % WYbig(:,t) = post(c,t) * Y(:,t)
+ WYY(:,:,c) = WYbig * Y';
+ WY(:,c) = sum(WYbig, 2);
+ WYTY(c) = sum(diag(WYbig' * Y));
+ end
+
+ % M step
+ prior = normalize(w);
+ [mu, Sigma] = mixgauss_Mstep(w, WY, WYY, WYTY, 'cov_type', cov_type, 'cov_prior', cov_prior);
+
+ if verbose, fprintf(1, 'iteration %d, loglik = %f\n', num_iter, loglik); end
+ num_iter = num_iter + 1;
+ converged = em_converged(loglik, previous_loglik, thresh);
+ previous_loglik = loglik;
+
+end
+
+if prune_thresh > 0
+ ndx = find(prior < prune_thresh);
+ mu(:,ndx) = [];
+ Sigma(:,:,ndx) = [];
+ prior(ndx) = [];
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_init.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_init.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function [mu, Sigma, weights] = mixgauss_init(M, data, cov_type, method)
+% MIXGAUSS_INIT Initial parameter estimates for a mixture of Gaussians
+% function [mu, Sigma, weights] = mixgauss_init(M, data, cov_type. method)
+%
+% INPUTS:
+% data(:,t) is the t'th example
+% M = num. mixture components
+% cov_type = 'full', 'diag' or 'spherical'
+% method = 'rnd' (choose centers randomly from data) or 'kmeans' (needs netlab)
+%
+% OUTPUTS:
+% mu(:,k)
+% Sigma(:,:,k)
+% weights(k)
+
+if nargin < 4, method = 'kmeans'; end
+
+[d T] = size(data);
+data = reshape(data, d, T); % in case it is data(:, t, sequence_num)
+
+switch method
+ case 'rnd',
+ C = cov(data');
+ Sigma = repmat(diag(diag(C))*0.5, [1 1 M]);
+ % Initialize each mean to a random data point
+ indices = randperm(T);
+ mu = data(:,indices(1:M));
+ weights = normalise(ones(M,1));
+ case 'kmeans',
+ mix = gmm(d, M, cov_type);
+ options = foptions;
+ max_iter = 5;
+ options(1) = -1; % be quiet!
+ options(14) = max_iter;
+ mix = gmminit(mix, data', options);
+ mu = reshape(mix.centres', [d M]);
+ weights = mix.priors(:);
+ for m=1:M
+ switch cov_type
+ case 'diag',
+ Sigma(:,:,m) = diag(mix.covars(m,:));
+ case 'full',
+ Sigma(:,:,m) = mix.covars(:,:,m);
+ case 'spherical',
+ Sigma(:,:,m) = mix.covars(m) * eye(d);
+ end
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_prob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_prob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,133 @@
+function [B, B2] = mixgauss_prob(data, mu, Sigma, mixmat, unit_norm)
+% EVAL_PDF_COND_MOG Evaluate the pdf of a conditional mixture of Gaussians
+% function [B, B2] = eval_pdf_cond_mog(data, mu, Sigma, mixmat, unit_norm)
+%
+% Notation: Y is observation, M is mixture component, and both may be conditioned on Q.
+% If Q does not exist, ignore references to Q=j below.
+% Alternatively, you may ignore M if this is a conditional Gaussian.
+%
+% INPUTS:
+% data(:,t) = t'th observation vector
+%
+% mu(:,k) = E[Y(t) | M(t)=k]
+% or mu(:,j,k) = E[Y(t) | Q(t)=j, M(t)=k]
+%
+% Sigma(:,:,j,k) = Cov[Y(t) | Q(t)=j, M(t)=k]
+% or there are various faster, special cases:
+% Sigma() - scalar, spherical covariance independent of M,Q.
+% Sigma(:,:) diag or full, tied params independent of M,Q.
+% Sigma(:,:,j) tied params independent of M.
+%
+% mixmat(k) = Pr(M(t)=k) = prior
+% or mixmat(j,k) = Pr(M(t)=k | Q(t)=j)
+% Not needed if M is not defined.
+%
+% unit_norm - optional; if 1, means data(:,i) AND mu(:,i) each have unit norm (slightly faster)
+%
+% OUTPUT:
+% B(t) = Pr(y(t))
+% or
+% B(i,t) = Pr(y(t) | Q(t)=i)
+% B2(i,k,t) = Pr(y(t) | Q(t)=i, M(t)=k)
+%
+% If the number of mixture components differs depending on Q, just set the trailing
+% entries of mixmat to 0, e.g., 2 components if Q=1, 3 components if Q=2,
+% then set mixmat(1,3)=0. In this case, B2(1,3,:)=1.0.
+
+
+
+
+if isvectorBNT(mu) & size(mu,2)==1
+ d = length(mu);
+ Q = 1; M = 1;
+elseif ndims(mu)==2
+ [d Q] = size(mu);
+ M = 1;
+else
+ [d Q M] = size(mu);
+end
+[d T] = size(data);
+
+if nargin < 4, mixmat = ones(Q,1); end
+if nargin < 5, unit_norm = 0; end
+
+%B2 = zeros(Q,M,T); % ATB: not needed allways
+%B = zeros(Q,T);
+
+if isscalarBNT(Sigma)
+ mu = reshape(mu, [d Q*M]);
+ if unit_norm % (p-q)'(p-q) = p'p + q'q - 2p'q = n+m -2p'q since p(:,i)'p(:,i)=1
+ %avoid an expensive repmat
+ disp('unit norm')
+ %tic; D = 2 -2*(data'*mu)'; toc
+ D = 2 - 2*(mu'*data);
+ tic; D2 = sqdist(data, mu)'; toc
+ assert(approxeq(D,D2))
+ else
+ D = sqdist(data, mu)';
+ end
+ clear mu data % ATB: clear big old data
+ % D(qm,t) = sq dist between data(:,t) and mu(:,qm)
+ logB2 = -(d/2)*log(2*pi*Sigma) - (1/(2*Sigma))*D; % det(sigma*I) = sigma^d
+ B2 = reshape(exp(logB2), [Q M T]);
+ clear logB2 % ATB: clear big old data
+
+elseif ndims(Sigma)==2 % tied full
+ mu = reshape(mu, [d Q*M]);
+ D = sqdist(data, mu, inv(Sigma))';
+ % D(qm,t) = sq dist between data(:,t) and mu(:,qm)
+ logB2 = -(d/2)*log(2*pi) - 0.5*logdet(Sigma) - 0.5*D;
+ %denom = sqrt(det(2*pi*Sigma));
+ %numer = exp(-0.5 * D);
+ %B2 = numer/denom;
+ B2 = reshape(exp(logB2), [Q M T]);
+
+elseif ndims(Sigma)==3 % tied across M
+ B2 = zeros(Q,M,T);
+ for j=1:Q
+ % D(m,t) = sq dist between data(:,t) and mu(:,j,m)
+ if isposdef(Sigma(:,:,j))
+ D = sqdist(data, permute(mu(:,j,:), [1 3 2]), inv(Sigma(:,:,j)))';
+ logB2 = -(d/2)*log(2*pi) - 0.5*logdet(Sigma(:,:,j)) - 0.5*D;
+ B2(j,:,:) = exp(logB2);
+ else
+ error(sprintf('mixgauss_prob: Sigma(:,:,q=%d) not psd\n', j));
+ end
+ end
+
+else % general case
+ B2 = zeros(Q,M,T);
+ for j=1:Q
+ for k=1:M
+ %if mixmat(j,k) > 0
+ B2(j,k,:) = gaussian_prob(data, mu(:,j,k), Sigma(:,:,j,k));
+ %end
+ end
+ end
+end
+
+% B(j,t) = sum_k B2(j,k,t) * Pr(M(t)=k | Q(t)=j)
+
+% The repmat is actually slower than the for-loop, because it uses too much memory
+% (this is true even for small T).
+
+%B = squeeze(sum(B2 .* repmat(mixmat, [1 1 T]), 2));
+%B = reshape(B, [Q T]); % undo effect of squeeze in case Q = 1
+
+B = zeros(Q,T);
+if Q < T
+ for q=1:Q
+ %B(q,:) = mixmat(q,:) * squeeze(B2(q,:,:)); % squeeze chnages order if M=1
+ B(q,:) = mixmat(q,:) * permute(B2(q,:,:), [2 3 1]); % vector * matrix sums over m
+ end
+else
+ for t=1:T
+ B(:,t) = sum(mixmat .* B2(:,:,t), 2); % sum over m
+ end
+end
+%t=toc;fprintf('%5.3f\n', t)
+
+%tic
+%A = squeeze(sum(B2 .* repmat(mixmat, [1 1 T]), 2));
+%t=toc;fprintf('%5.3f\n', t)
+%assert(approxeq(A,B)) % may be false because of round off error
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_prob_test.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_prob_test.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,111 @@
+function test_eval_pdf_cond_mixgauss()
+
+%Q = 10; M = 100; d = 20; T = 500;
+Q = 2; M = 3; d = 4; T = 5;
+
+mu = rand(d,Q,M);
+data = randn(d,T);
+%mixmat = mk_stochastic(rand(Q,M));
+mixmat = mk_stochastic(ones(Q,M));
+
+% tied scalar
+Sigma = 0.01;
+
+mu = rand(d,M,Q);
+weights = mixmat';
+N = M*ones(1,Q);
+tic; [B, B2, D] = parzen(data, mu, Sigma, N, weights); toc
+tic; [BC, B2C, DC] = parzenC(data, mu, Sigma, N); toc
+approxeq(B,BC)
+B2C = reshape(B2C,[M Q T]);
+approxeq(B2,B2C)
+DC = reshape(DC,[M Q T]);
+approxeq(D,DC)
+
+
+return
+
+tic; [B, B2] = eval_pdf_cond_mixgauss(data, mu, Sigma, mixmat); toc
+tic; C = eval_pdf_cond_parzen(data, mu, Sigma); toc
+approxeq(B,C)
+
+return;
+
+
+mu = reshape(mu, [d Q*M]);
+
+data = mk_unit_norm(data);
+mu = mk_unit_norm(mu);
+tic; D = 2 -2*(data'*mu); toc % avoid an expensive repmat
+tic; D2 = sqdist(data, mu); toc
+approxeq(D,D2)
+
+
+% D(t,m) = sq dist between data(:,t) and mu(:,m)
+mu = reshape(mu, [d Q*M]);
+D = dist2(data', mu');
+%denom = (2*pi)^(d/2)*sqrt(abs(det(C)));
+denom = (2*pi*Sigma)^(d/2); % sqrt(det(2*pi*Sigma))
+numer = exp(-0.5/Sigma * D');
+B2 = numer / denom;
+B2 = reshape(B2, [Q M T]);
+
+tic; B = squeeze(sum(B2 .* repmat(mixmat, [1 1 T]), 2)); toc
+
+tic
+A = zeros(Q,T);
+for q=1:Q
+ A(q,:) = mixmat(q,:) * squeeze(B2(q,:,:)); % sum over m
+end
+toc
+assert(approxeq(A,B))
+
+tic
+A = zeros(Q,T);
+for t=1:T
+ A(:,t) = sum(mixmat .* B2(:,:,t), 2); % sum over m
+end
+toc
+assert(approxeq(A,B))
+
+
+
+
+mu = reshape(mu, [d Q M]);
+B3 = zeros(Q,M,T);
+for j=1:Q
+ for k=1:M
+ B3(j,k,:) = gaussian_prob(data, mu(:,j,k), Sigma*eye(d));
+ end
+end
+assert(approxeq(B2, B3))
+
+logB4 = -(d/2)*log(2*pi*Sigma) - (1/(2*Sigma))*D; % det(sigma*I) = sigma^d
+B4 = reshape(exp(logB4), [Q M T]);
+assert(approxeq(B4, B3))
+
+
+
+
+% tied cov matrix
+
+Sigma = rand_psd(d,d);
+mu = reshape(mu, [d Q*M]);
+D = sqdist(data, mu, inv(Sigma))';
+denom = sqrt(det(2*pi*Sigma));
+numer = exp(-0.5 * D);
+B2 = numer / denom;
+B2 = reshape(B2, [Q M T]);
+
+mu = reshape(mu, [d Q M]);
+B3 = zeros(Q,M,T);
+for j=1:Q
+ for k=1:M
+ B3(j,k,:) = gaussian_prob(data, mu(:,j,k), Sigma);
+ end
+end
+assert(approxeq(B2, B3))
+
+logB4 = -(d/2)*log(2*pi) - 0.5*logdet(Sigma) - 0.5*D;
+B4 = reshape(exp(logB4), [Q M T]);
+assert(approxeq(B4, B3))
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mixgauss_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [data, indices] = mixgauss_sample(mu, Sigma, mixweights, Nsamples)
+% mixgauss_sample Sample data from a mixture of Gaussians
+% function [data, indices] = mixgauss_sample(mu, Sigma, mixweights, Nsamples)
+%
+% Model is P(X) = sum_k mixweights(k) N(X; mu(:,k), Sigma(:,:,k)) or Sigma(k) for scalar
+% data(:,i) is the i'th sample from P(X)
+% indices(i) is the component from which sample i was drawn
+
+[D K] = size(mu);
+data = zeros(D, Nsamples);
+indices = sample_discrete(mixweights, 1, Nsamples);
+for k=1:K
+ if ndims(Sigma) < 3
+ sig = Sigma(k);
+ else
+ sig = Sigma(:,:,k);
+ end
+ ndx = find(indices==k);
+ if length(ndx) > 0
+ data(:,ndx) = sample_gaussian(mu(:,k), sig, length(ndx))';
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mkPolyFvec.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mkPolyFvec.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function p = mkPolyFvec(x)
+% MKPOLYFVEC Make feature vector by constructing 2nd order polynomial from input data
+% function p = mkPolyFvec(x)
+%
+% x(:,i) for example i
+% p(:,i) = [x(1,i) x(2,i) x(3,i) x(1,i)^2 x(2,i)^2 x(3,i)^2 ..
+% x(1,i)*x(2,i) x(1,i)*x(3,i) x(2,i)*x(3,i)]'
+%
+% Example
+% x = [4 5 6]'
+% p = [4 5 6 16 25 36 20 24 30]'
+
+fvec = x;
+fvecSq = x.*x;
+[D N] = size(x);
+fvecCross = zeros(D*(D-1)/2, N);
+i = 1;
+for d=1:D
+ for d2=d+1:D
+ fvecCross(i,:) = x(d,:) .* x(d2,:);
+ i = i + 1;
+ end
+end
+p = [fvec; fvecSq; fvecCross];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/mk_unit_norm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/mk_unit_norm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function B = mk_unit_norm(A)
+% MK_UNIT_NORM Make each column be a unit norm vector
+% function B = mk_unit_norm(A)
+%
+% We divide each column by its magnitude
+
+
+[nrows ncols] = size(A);
+s = sum(A.^2);
+ndx = find(s==0);
+s(ndx)=1;
+B = A ./ repmat(sqrt(s), [nrows 1]);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/multinomial_prob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/multinomial_prob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+function B = eval_pdf_cond_multinomial(data, obsmat)
+% EVAL_PDF_COND_MULTINOMIAL Evaluate pdf of conditional multinomial
+% function B = eval_pdf_cond_multinomial(data, obsmat)
+%
+% Notation: Y = observation (O values), Q = conditioning variable (K values)
+%
+% Inputs:
+% data(t) = t'th observation - must be an integer in {1,2,...,K}: cannot be 0!
+% obsmat(i,o) = Pr(Y(t)=o | Q(t)=i)
+%
+% Output:
+% B(i,t) = Pr(y(t) | Q(t)=i)
+
+[Q O] = size(obsmat);
+T = prod(size(data)); % length(data);
+B = zeros(Q,T);
+
+for t=1:T
+ B(:,t) = obsmat(:, data(t));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/multinomial_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/multinomial_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function Y = sample_cond_multinomial(X, M)
+% SAMPLE_MULTINOMIAL Sample Y(i) ~ M(X(i), :)
+% function Y = sample_multinomial(X, M)
+%
+% X(i) = i'th sample
+% M(i,j) = P(Y=j | X=i) = noisy channel model
+%
+% e.g., if X is a binary image,
+% Y = sample_multinomial(softeye(2, 0.9), X)
+% will create a noisy version of X, where bits are flipped with probability 0.1
+
+if any(X(:)==0)
+ error('data must only contain positive integers')
+end
+
+Y = zeros(size(X));
+for i=min(X(:)):max(X(:))
+ ndx = find(X==i);
+ Y(ndx) = sample_discrete(M(i,:), length(ndx), 1);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/multipdf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/multipdf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+function p = multipdf(x,theta)
+%MULTIPDF Multinomial probability density function.
+% p = multipdf(x,theta) returns the probabilities of
+% vector x, under the multinomial distribution
+% with parameter vector theta.
+%
+% Author: David Ross
+
+%--------------------------------------------------------
+% Check the arguments.
+%--------------------------------------------------------
+error(nargchk(2,2,nargin));
+
+% make sure theta is a vector
+if ndims(theta) > 2 | all(size(theta) > 1)
+ error('theta must be a vector');
+end
+
+% make sure x is of the appropriate size
+if ndims(x) > 2 | any(size(x) ~= size(theta))
+ error('columns of X must have same length as theta');
+end
+
+
+%--------------------------------------------------------
+% Main...
+%--------------------------------------------------------
+p = prod(theta .^ x);
+p = p .* factorial(sum(x)) ./ prod(factorial_v(x));
+
+
+%--------------------------------------------------------
+% Function factorial_v(x): computes the factorial function
+% on each element of x
+%--------------------------------------------------------
+function r = factorial_v(x)
+
+if size(x,2) == 1
+ x = x';
+end
+
+r = [];
+for y = x
+ r = [r factorial(y)];
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/multirnd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/multirnd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+function r = multirnd(theta,k)
+%MULTIRND - Random vector from multinomial distribution.
+% r = multirnd(theta,k) returns a vector randomly selected
+% from the multinomial distribution with parameter vector
+% theta, and count k (i.e. sum(r) = k).
+%
+% Note: if k is unspecified, then it is assumed k=1.
+%
+% Author: David Ross
+%
+
+%--------------------------------------------------------
+% Check the arguments.
+%--------------------------------------------------------
+error(nargchk(1,2,nargin));
+
+% make sure theta is a vector
+if ndims(theta) > 2 | all(size(theta) > 1)
+ error('theta must be a vector');
+end
+
+% if theta is a row vector, convert it to a column vector
+if size(theta,1) == 1
+ theta = theta';
+end
+
+% make sure k is a scalar?
+
+% if the number of samples has not been provided, set
+% it to one
+if nargin == 1
+ k = 1;
+end
+
+
+%--------------------------------------------------------
+% Main...
+%--------------------------------------------------------
+n = length(theta);
+theta_cdf = cumsum(theta);
+
+r = zeros(n,1);
+random_vals = rand(k,1);
+
+for j = 1:k
+ index = min(find(random_vals(j) <= theta_cdf));
+ r(index) = r(index) + 1;
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/normal_coef.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/normal_coef.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function c = normal_coef (Sigma)
+% NORMAL_COEF Compute the normalizing coefficient for a multivariate gaussian.
+% c = normal_coef (Sigma)
+
+n = length(Sigma);
+c = (2*pi)^(-n/2) * det(Sigma)^(-0.5);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/partial_corr_coef.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/partial_corr_coef.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function [r, c] = partial_corr_coef(S, i, j, Y)
+% PARTIAL_CORR_COEF Compute a partial correlation coefficient
+% [r, c] = partial_corr_coef(S, i, j, Y)
+%
+% S is the covariance (or correlation) matrix for X, Y, Z
+% where X=[i j], Y is conditioned on, and Z is marginalized out.
+% Let S2 = Cov[X | Y] be the partial covariance matrix.
+% Then c = S2(i,j) and r = c / sqrt( S2(i,i) * S2(j,j) )
+%
+
+% Example: Anderson (1984) p129
+% S = [1.0 0.8 -0.4;
+% 0.8 1.0 -0.56;
+% -0.4 -0.56 1.0];
+% r(1,3 | 2) = 0.0966
+%
+% Example: Van de Geer (1971) p111
+%S = [1 0.453 0.322;
+% 0.453 1.0 0.596;
+% 0.322 0.596 1];
+% r(2,3 | 1) = 0.533
+
+X = [i j];
+i2 = 1; % find_equiv_posns(i, X);
+j2 = 2; % find_equiv_posns(j, X);
+S2 = S(X,X) - S(X,Y)*inv(S(Y,Y))*S(Y,X);
+c = S2(i2,j2);
+r = c / sqrt(S2(i2,i2) * S2(j2,j2));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/parzen.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/parzen.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,88 @@
+function [B,B2,dist] = parzen(data, mu, Sigma, N)
+% EVAL_PDF_COND_PARZEN Evaluate the pdf of a conditional Parzen window
+% function B = eval_pdf_cond_parzen(data, mu, Sigma, N)
+%
+% B(q,t) = Pr(data(:,t) | Q=q) = sum_{m=1}^{N(q)} w(m,q)*K(data(:,t) - mu(:,m,q); sigma)
+% where K() is a Gaussian kernel with spherical variance sigma,
+% and w(m,q) = 1/N(q) if m<=N(q) and = 0 otherwise
+% where N(q) is the number of mxiture components for q
+%
+% B2(m,q,t) = K(data(:,t) - mu(:,m,q); sigma) for m=1:max(N)
+
+% This is like eval_pdf_cond_parzen, except mu is mu(:,m,q) instead of mu(:,q,m)
+% and we use 1/N(q) instead of mixmat(q,m)
+
+if nargout >= 2
+ keep_B2 = 1;
+else
+ keep_B2 = 0;
+end
+
+if nargout >= 3
+ keep_dist = 1;
+else
+ keep_dist = 0;
+end
+
+[d M Q] = size(mu);
+[d T] = size(data);
+
+M = max(N(:));
+
+B = zeros(Q,T);
+const1 = (2*pi*Sigma)^(-d/2);
+const2 = -(1/(2*Sigma));
+if T*Q*M>20000000 % not enough memory to call sqdist
+ disp('eval parzen for loop')
+ if keep_dist,
+ dist = zeros(M,Q,T);
+ end
+ if keep_B2
+ B2 = zeros(M,Q,T);
+ end
+ for q=1:Q
+ D = sqdist(mu(:,1:N(q),q), data); % D(m,t)
+ if keep_dist
+ dist(:,q,:) = D;
+ end
+ tmp = const1 * exp(const2*D);
+ if keep_B2,
+ B2(:,q,:) = tmp;
+ end
+ if N(q) > 0
+ %B(q,:) = (1/N(q)) * const1 * sum(exp(const2*D), 2);
+ B(q,:) = (1/N(q)) * sum(tmp,1);
+ end
+ end
+else
+ %disp('eval parzen vectorized')
+ dist = sqdist(reshape(mu(:,1:M,:), [d M*Q]), data); % D(mq,t)
+ dist = reshape(dist, [M Q T]);
+ B2 = const1 * exp(const2*dist); % B2(m,q,t)
+ if ~keep_dist
+ clear dist
+ end
+
+ % weights(m,q) is the weight of mixture component m for q
+ % = 1/N(q) if m<=N(q) and = 0 otherwise
+ % e.g., N = [2 3 1], M = 3,
+ % weights = [1/2 1/3 1 = 1/2 1/3 1/1 2 3 1 1 1 1
+ % 1/2 1/3 0 1/2 1/3 1/1 .* 2 3 1 <= 2 2 2
+ % 0 1/3 0] 1/2 1/3 1/1 2 3 1 3 3 3
+
+ Ns = repmat(N(:)', [M 1]);
+ ramp = 1:M;
+ ramp = repmat(ramp(:), [1 Q]);
+ n = N + (N==0); % avoid 1/0 by replacing with 0* 1/1m where 0 comes from mask
+ N1 = repmat(1 ./ n(:)', [M 1]);
+ mask = (ramp <= Ns);
+ weights = N1 .* mask;
+ B2 = B2 .* repmat(mask, [1 1 T]);
+
+ % B(q,t) = sum_m B2(m,q,t) * P(m|q) = sum_m B2(m,q,t) * weights(m,q)
+ B = squeeze(sum(B2 .* repmat(weights, [1 1 T]), 1));
+ B = reshape(B, [Q T]); % undo effect of squeeze in case Q = 1
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/parzenC.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/parzenC.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,116 @@
+/* C mex version of parzen.m
+[B,B2] = parzen(feat, mu, Sigma, Nproto);
+*/
+#include "mex.h"
+#include
+#include
+
+#define PI 3.141592654
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
+ int D, M, Q, T, d, m, q, t;
+ double *data, *mu, *SigmaPtr, *N, Sigma;
+ double *B, *dist, *B2, tmp;
+ const int* dim_mu;
+ double const1, const2, sum_m, sum_d, diff;
+ int Dt, DMq, Dm, MQt, Mq;
+ int dims_B2[3];
+
+ int ndim_mu, i, save_B2;
+
+ data = mxGetPr(prhs[0]);
+ mu = mxGetPr(prhs[1]);
+ SigmaPtr = mxGetPr(prhs[2]);
+ Sigma = *SigmaPtr;
+ N = mxGetPr(prhs[3]);
+
+ D = mxGetM(prhs[0]);
+ T = mxGetN(prhs[0]);
+
+ ndim_mu = mxGetNumberOfDimensions(prhs[1]);
+ dim_mu = mxGetDimensions(prhs[1]);
+ D = dim_mu[0];
+ M = dim_mu[1];
+ /* printf("parzenC: nlhs=%d, D=%d, M=%d, T=%d\n", nlhs, D, M, T); */
+
+ /* If mu is mu(d,m,o,p), then [d M Q] = size(mu) in matlab sets Q=o*p,
+ i.e.. the size of all conditioning variabeles */
+ Q = 1;
+ for (i = 2; i < ndim_mu; i++) {
+ /* printf("dim_mu[%d]=%d\n", i, dim_mu[i]); */
+ Q = Q*dim_mu[i];
+ }
+
+ /* M = max(N) */
+ M = -1000000;
+ for (i=0; i < Q; i++) {
+ /* printf("N[%d]=%d\n", i, (int) N[i]); */
+ if (N[i] > M) {
+ M = (int) N[i];
+ }
+ }
+
+ /* printf("parzenC: nlhs=%d, D=%d, Q=%d, M=%d, T=%d\n", nlhs, D, Q, M, T); */
+
+ plhs[0] = mxCreateDoubleMatrix(Q,T, mxREAL);
+ B = mxGetPr(plhs[0]);
+
+ if (nlhs >= 2)
+ save_B2 = 1;
+ else
+ save_B2 = 0;
+
+ if (save_B2) {
+ /* printf("parzenC saving B2\n"); */
+ /*plhs[1] = mxCreateDoubleMatrix(M*Q*T,1, mxREAL);*/
+ dims_B2[0] = M;
+ dims_B2[1] = Q;
+ dims_B2[2] = T;
+ plhs[1] = mxCreateNumericArray(3, dims_B2, mxDOUBLE_CLASS, mxREAL);
+ B2 = mxGetPr(plhs[1]);
+ } else {
+ /* printf("parzenC not saving B2\n"); */
+ }
+ /*
+ plhs[2] = mxCreateDoubleMatrix(M*Q*T,1, mxREAL);
+ dist = mxGetPr(plhs[2]);
+ */
+ const1 = pow(2*PI*Sigma, -D/2.0);
+ const2 = -(1/(2*Sigma));
+
+ for (t=0; t < T; t++) {
+ /* printf("t=%d!\n",t); */
+ Dt = D*t;
+ MQt = M*Q*t;
+ for (q=0; q < Q; q++) {
+ sum_m = 0;
+ DMq = D*M*q;
+ Mq = M*q;
+
+ for (m=0; m < (int)N[q]; m++) {
+ sum_d = 0;
+ Dm = D*m;
+ for (d=0; d < D; d++) {
+ /* diff = data(d,t) - mu(d,m,q) */
+ /*diff = data[d + D*t] - mu[d + D*m + D*M*q]; */
+ diff = data[d + Dt] - mu[d + Dm + DMq];
+ sum_d = sum_d + diff*diff;
+ }
+ /* dist[m,q,t] = dist[m + M*q + M*Q*t] = dist[m + Mq + MQt] = sum_d */
+ tmp = const1 * exp(const2*sum_d);
+ sum_m = sum_m + tmp;
+ if (save_B2)
+ B2[m + Mq + MQt] = tmp;
+ }
+
+ if (N[q]>0) {
+ B[q + Q*t] = (1.0/N[q]) * sum_m;
+ } else {
+ B[q + Q*t] = 0.0;
+ }
+ }
+ }
+}
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/parzenC.dll
Binary file toolboxes/FullBNT-1.0.7/KPMstats/parzenC.dll has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/parzenC_test.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/parzenC_test.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+d = 2; M = 3; Q = 4; T = 5; Sigma = 10;
+N = sample_discrete(normalize(ones(1,M)), 1, Q);
+data = randn(d,T);
+mu = randn(d,M,Q);
+
+[BM, B2M] = parzen(data, mu, Sigma, N);
+[B, B2] = parzenC(data, mu, Sigma, N);
+
+approxeq(B,BM)
+approxeq(B2,B2M)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/parzen_fit_select_unif.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/parzen_fit_select_unif.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+function [mu, N, pick] = parzen_fit_select_unif(data, labels, max_proto, varargin)
+% PARZEN_FIT_SELECT_UNIF Fit a parzen density estimator by selecting prototypes uniformly from data
+% [mu, N, pick] = parzen_fit_select_unif(data, max_proto, labels, ...)
+%
+% We partition the data into different subsets based on the labels.
+% We then choose up to max_proto columns from each subset, chosen uniformly.
+%
+% INPUTS
+% data(:,t)
+% labels(t) - should be in {1,2,..,Q}
+% max_proto - max number of prototypes per partition
+%
+% Optional args
+% partition_names{m} - for debugging
+% boundary - do not choose prototypes which are within 'boundary' of the label transition
+%
+% OUTPUTS
+% mu(:, m, q) for label q, prototype m for 1 <= m <= N(q)
+% N(q) = number of prototypes for label q
+% pick{q} = identity of the prototypes
+
+nclasses = max(labels);
+[boundary, partition_names] = process_options(...
+ varargin, 'boundary', 0, 'partition_names', []);
+
+[D T] = size(data);
+mu = zeros(D, 1, nclasses); % dynamically determine num prototypes (may be less than K)
+mean_feat = mean(data,2);
+pick = cell(1,nclasses);
+for c=1:nclasses
+ ndx = find(labels==c);
+ if isempty(ndx)
+ %fprintf('no training images have label %d (%s)\n', c, partition_names{c})
+ fprintf('no training images have label %d\n', c);
+ nviews = 1;
+ mu(:,1,c) = mean_feat;
+ else
+ foo = linspace(boundary+1, length(ndx-boundary), max_proto);
+ pick{c} = ndx(unique(floor(foo)));
+ nviews = length(pick{c});
+ %fprintf('picking %d views for class %d=%s\n', nviews, c, class_names{c});
+ mu(:,1:nviews,c) = data(:, pick{c});
+ end
+ N(c) = nviews;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/pca.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/pca.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+function [PCcoeff, PCvec] = pca(data, N)
+%PCA Principal Components Analysis
+%
+% Description
+% PCCOEFF = PCA(DATA) computes the eigenvalues of the covariance
+% matrix of the dataset DATA and returns them as PCCOEFF. These
+% coefficients give the variance of DATA along the corresponding
+% principal components.
+%
+% PCCOEFF = PCA(DATA, N) returns the largest N eigenvalues.
+%
+% [PCCOEFF, PCVEC] = PCA(DATA) returns the principal components as well
+% as the coefficients. This is considerably more computationally
+% demanding than just computing the eigenvalues.
+%
+% See also
+% EIGDEC, GTMINIT, PPCA
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargin == 1
+ N = size(data, 2);
+end
+
+if nargout == 1
+ evals_only = logical(1);
+else
+ evals_only = logical(0);
+end
+
+if N ~= round(N) | N < 1 | N > size(data, 2)
+ error('Number of PCs must be integer, >0, < dim');
+end
+
+% Find the sorted eigenvalues of the data covariance matrix
+if evals_only
+ PCcoeff = eigdec(cov(data), N);
+else
+ [PCcoeff, PCvec] = eigdec(cov(data), N);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/rndcheck.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/rndcheck.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,294 @@
+function [errorcode, rows, columns] = rndcheck(nargs,nparms,arg1,arg2,arg3,arg4,arg5)
+%RNDCHECK error checks the argument list for the random number generators.
+
+% B.A. Jones 1-22-93
+% Copyright (c) 1993-98 by The MathWorks, Inc.
+% $Revision: 1.1.1.1 $ $Date: 2005/04/26 02:29:22 $
+
+sizeinfo = nargs - nparms;
+errorcode = 0;
+
+if nparms == 3
+ [r1 c1] = size(arg1);
+ [r2 c2] = size(arg2);
+ [r3 c3] = size(arg3);
+end
+
+if nparms == 2
+ [r1 c1] = size(arg1);
+ [r2 c2] = size(arg2);
+end
+
+if sizeinfo == 0
+ if nparms == 1
+ [rows columns] = size(arg1);
+ end
+
+ if nparms == 2
+ scalararg1 = (prod(size(arg1)) == 1);
+ scalararg2 = (prod(size(arg2)) == 1);
+ if ~scalararg1 & ~scalararg2
+ if r1 ~= r2 | c1 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+ if ~scalararg1
+ [rows columns] = size(arg1);
+ elseif ~scalararg2
+ [rows columns] = size(arg2);
+ else
+ [rows columns] = size(arg1);
+ end
+ end
+
+ if nparms == 3
+ scalararg1 = (prod(size(arg1)) == 1);
+ scalararg2 = (prod(size(arg2)) == 1);
+ scalararg3 = (prod(size(arg3)) == 1);
+
+ if ~scalararg1 & ~scalararg2
+ if r1 ~= r2 | c1 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg1 & ~scalararg3
+ if r1 ~= r3 | c1 ~= c3
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg3 & ~scalararg2
+ if r3 ~= r2 | c3 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+ if ~scalararg1
+ [rows columns] = size(arg1);
+ elseif ~scalararg2
+ [rows columns] = size(arg2);
+ else
+ [rows columns] = size(arg3);
+ end
+ end
+end
+
+if sizeinfo == 1
+ scalararg1 = (prod(size(arg1)) == 1);
+ if nparms == 1
+ if prod(size(arg2)) ~= 2
+ errorcode = 2;
+ return;
+ end
+ if ~scalararg1 & arg2 ~= size(arg1)
+ errorcode = 3;
+ return;
+ end
+ if (arg2(1) < 0 | arg2(2) < 0 | arg2(1) ~= round(arg2(1)) | arg2(2) ~= round(arg2(2))),
+ errorcode = 4;
+ return;
+ end
+ rows = arg2(1);
+ columns = arg2(2);
+ end
+
+ if nparms == 2
+ if prod(size(arg3)) ~= 2
+ errorcode = 2;
+ return;
+ end
+ scalararg2 = (prod(size(arg2)) == 1);
+ if ~scalararg1 & ~scalararg2
+ if r1 ~= r2 | c1 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+ if (arg3(1) < 0 | arg3(2) < 0 | arg3(1) ~= round(arg3(1)) | arg3(2) ~= round(arg3(2))),
+ errorcode = 4;
+ return;
+ end
+ if ~scalararg1
+ if any(arg3 ~= size(arg1))
+ errorcode = 3;
+ return;
+ end
+ [rows columns] = size(arg1);
+ elseif ~scalararg2
+ if any(arg3 ~= size(arg2))
+ errorcode = 3;
+ return;
+ end
+ [rows columns] = size(arg2);
+ else
+ rows = arg3(1);
+ columns = arg3(2);
+ end
+ end
+
+ if nparms == 3
+ if prod(size(arg4)) ~= 2
+ errorcode = 2;
+ return;
+ end
+ scalararg1 = (prod(size(arg1)) == 1);
+ scalararg2 = (prod(size(arg2)) == 1);
+ scalararg3 = (prod(size(arg3)) == 1);
+
+ if (arg4(1) < 0 | arg4(2) < 0 | arg4(1) ~= round(arg4(1)) | arg4(2) ~= round(arg4(2))),
+ errorcode = 4;
+ return;
+ end
+
+ if ~scalararg1 & ~scalararg2
+ if r1 ~= r2 | c1 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg1 & ~scalararg3
+ if r1 ~= r3 | c1 ~= c3
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg3 & ~scalararg2
+ if r3 ~= r2 | c3 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+ if ~scalararg1
+ if any(arg4 ~= size(arg1))
+ errorcode = 3;
+ return;
+ end
+ [rows columns] = size(arg1);
+ elseif ~scalararg2
+ if any(arg4 ~= size(arg2))
+ errorcode = 3;
+ return;
+ end
+ [rows columns] = size(arg2);
+ elseif ~scalararg3
+ if any(arg4 ~= size(arg3))
+ errorcode = 3;
+ return;
+ end
+ [rows columns] = size(arg3);
+ else
+ rows = arg4(1);
+ columns = arg4(2);
+ end
+ end
+end
+
+if sizeinfo == 2
+ if nparms == 1
+ scalararg1 = (prod(size(arg1)) == 1);
+ if ~scalararg1
+ [rows columns] = size(arg1);
+ if rows ~= arg2 | columns ~= arg3
+ errorcode = 3;
+ return;
+ end
+ end
+ if (arg2 < 0 | arg3 < 0 | arg2 ~= round(arg2) | arg3 ~= round(arg3)),
+ errorcode = 4;
+ return;
+ end
+ rows = arg2;
+ columns = arg3;
+ end
+
+ if nparms == 2
+ scalararg1 = (prod(size(arg1)) == 1);
+ scalararg2 = (prod(size(arg2)) == 1);
+ if ~scalararg1 & ~scalararg2
+ if r1 ~= r2 | c1 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+ if ~scalararg1
+ [rows columns] = size(arg1);
+ if rows ~= arg3 | columns ~= arg4
+ errorcode = 3;
+ return;
+ end
+ elseif ~scalararg2
+ [rows columns] = size(arg2);
+ if rows ~= arg3 | columns ~= arg4
+ errorcode = 3;
+ return;
+ end
+ else
+ if (arg3 < 0 | arg4 < 0 | arg3 ~= round(arg3) | arg4 ~= round(arg4)),
+ errorcode = 4;
+ return;
+ end
+ rows = arg3;
+ columns = arg4;
+ end
+ end
+
+ if nparms == 3
+ scalararg1 = (prod(size(arg1)) == 1);
+ scalararg2 = (prod(size(arg2)) == 1);
+ scalararg3 = (prod(size(arg3)) == 1);
+
+ if ~scalararg1 & ~scalararg2
+ if r1 ~= r2 | c1 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg1 & ~scalararg3
+ if r1 ~= r3 | c1 ~= c3
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg3 & ~scalararg2
+ if r3 ~= r2 | c3 ~= c2
+ errorcode = 1;
+ return;
+ end
+ end
+
+ if ~scalararg1
+ [rows columns] = size(arg1);
+ if rows ~= arg4 | columns ~= arg5
+ errorcode = 3;
+ return;
+ end
+ elseif ~scalararg2
+ [rows columns] = size(arg2);
+ if rows ~= arg4 | columns ~= arg5
+ errorcode = 3;
+ return;
+ end
+ elseif ~scalararg3
+ [rows columns] = size(arg3);
+ if rows ~= arg4 | columns ~= arg5
+ errorcode = 3;
+ return;
+ end
+ else
+ if (arg4 < 0 | arg5 < 0 | arg4 ~= round(arg4) | arg5 ~= round(arg5)),
+ errorcode = 4;
+ return;
+ end
+ rows = arg4;
+ columns = arg5;
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function x = sample(p, n)
+% SAMPLE Sample from categorical distribution.
+% Returns a row vector of integers, sampled according to the probability
+% distribution p.
+% Uses the stick-breaking algorithm.
+% Much faster algorithms are also possible.
+
+if nargin < 2
+ n = 1;
+end
+
+cdf = cumsum(p(:));
+for i = 1:n
+ x(i) = sum(cdf < rand) + 1;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/sample_discrete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/sample_discrete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function M = sample_discrete(prob, r, c)
+% SAMPLE_DISCRETE Like the built in 'rand', except we draw from a non-uniform discrete distrib.
+% M = sample_discrete(prob, r, c)
+%
+% Example: sample_discrete([0.8 0.2], 1, 10) generates a row vector of 10 random integers from {1,2},
+% where the prob. of being 1 is 0.8 and the prob of being 2 is 0.2.
+
+n = length(prob);
+
+if nargin == 1
+ r = 1; c = 1;
+elseif nargin == 2
+ c == r;
+end
+
+R = rand(r, c);
+M = ones(r, c);
+cumprob = cumsum(prob(:));
+
+if n < r*c
+ for i = 1:n-1
+ M = M + (R > cumprob(i));
+ end
+else
+ % loop over the smaller index - can be much faster if length(prob) >> r*c
+ cumprob2 = cumprob(1:end-1);
+ for i=1:r
+ for j=1:c
+ M(i,j) = sum(R(i,j) > cumprob2)+1;
+ end
+ end
+end
+
+
+% Slower, even though vectorized
+%cumprob = reshape(cumsum([0 prob(1:end-1)]), [1 1 n]);
+%M = sum(R(:,:,ones(n,1)) > cumprob(ones(r,1),ones(c,1),:), 3);
+
+% convert using a binning algorithm
+%M=bindex(R,cumprob);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/sample_gaussian.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/sample_gaussian.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function M = sample_gaussian(mu, Sigma, N)
+% SAMPLE_GAUSSIAN Draw N random row vectors from a Gaussian distribution
+% samples = sample_gaussian(mean, cov, N)
+
+if nargin==2
+ N = 1;
+end
+
+% If Y = CX, Var(Y) = C Var(X) C'.
+% So if Var(X)=I, and we want Var(Y)=Sigma, we need to find C. s.t. Sigma = C C'.
+% Since Sigma is psd, we have Sigma = U D U' = (U D^0.5) (D'^0.5 U').
+
+mu = mu(:);
+n=length(mu);
+[U,D,V] = svd(Sigma);
+M = randn(n,N);
+M = (U*sqrt(D))*M + mu*ones(1,N); % transform each column
+M = M';
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/standardize.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/standardize.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function [S, mu, sigma2] = standardize(M, mu, sigma2)
+% function S = standardize(M, mu, sigma2)
+% Make each column of M be zero mean, std 1.
+% Thus each row is scaled separately.
+%
+% If mu, sigma2 are omitted, they are computed from M
+
+M = double(M);
+if nargin < 2
+ mu = mean(M,2);
+ sigma2 = std(M,0,2);
+ sigma2 = sigma2 + eps*(sigma2==0);
+end
+
+[nrows ncols] = size(M);
+S = M - repmat(mu(:), [1 ncols]);
+S = S ./ repmat(sigma2, [1 ncols]);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/student_t_logprob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/student_t_logprob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function L = log_student_pdf(X, mu, lambda, alpha)
+% LOG_STUDENT_PDF Evaluate the log of the multivariate student-t distribution at a point
+% L = log_student_pdf(X, mu, lambda, alpha)
+%
+% Each column of X is evaluated.
+% See Bernardo and Smith p435.
+
+k = length(mu);
+assert(size(X,1) == k);
+[k N] = size(X);
+logc = gammaln(0.5*(alpha+k)) - gammaln(0.5*alpha) - (k/2)*log(alpha*pi) + 0.5*log(det(lambda));
+middle = (1 + (1/alpha)*(X-mu)'*lambda*(X-mu)); % scalar version
+L = logc - ((alpha+k)/2)*log(middle);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/student_t_prob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/student_t_prob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function p = student_t_pdf(X, mu, lambda, alpha)
+% STUDENT_T_PDF Evaluate the multivariate student-t distribution at a point
+% p = student_t_pdf(X, mu, lambda, alpha)
+%
+% Each column of X is evaluated.
+% See Bernardo and Smith p435.
+
+k = length(mu);
+assert(size(X,1) == k);
+[k N] = size(X);
+numer = gamma(0.5*(alpha+k));
+denom = gamma(0.5*alpha) * (alpha*pi)^(k/2);
+c = (numer/denom) * det(lambda)^(0.5);
+p = c*(1 + (1/alpha)*(X-mu)'*lambda*(X-mu))^(-(alpha+k)/2); % scalar version
+%m = repmat(mu(:), 1, N);
+%exponent = sum((X-m)'*lambda*(X-m), 2); % column vector
+%p = c*(1 + (1/alpha)*exponent).^(-(alpha+k)/2);
+
+keyboard
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/test_dir.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/test_dir.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+% # of sample points
+n_samples = 1000;
+
+p = ones(3,1)/3;
+
+% Low Entropy
+alpha = 0.5*p;
+
+% High Entropy
+%alpha = 10*p;
+
+% draw n_samples random points from the 3-d dirichlet(alpha),
+% and plot the results
+points = zeros(3,n_samples);
+for i = 1:n_samples
+ points(:,i) = dirichletrnd(alpha);
+end
+
+scatter3(points(1,:)', points(2,:)', points(3,:)', 'r', '.', 'filled');
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/unidrndKPM.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/unidrndKPM.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function R = unidrndKPM(min, max, nr, nc)
+
+if nargin < 3
+ nr = 1; nc = 1;
+end
+
+R = unidrnd(max-min+1, nr, nc) + (min-1);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/unif_discrete_sample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/unif_discrete_sample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function r = unif_discrete_sample(n, nrows, ncols)
+% UNIF_DISCRETE_SAMPLE Generate random numbers uniformly from {1,2,..,n}
+% function r = unif_discrete_sample(n, nrows, ncols)
+% Same as unidrnd in the stats toolbox.
+
+r = ceil(n .* rand(nrows,ncols));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMstats/weightedRegression.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMstats/weightedRegression.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,58 @@
+function [a, b, error] = weightedRegression(x, z, w)
+% [a , b, error] = fitRegression(x, z, w);
+% % Weighted scalar linear regression
+%
+% Find a,b to minimize
+% error = sum(w * |z - (a*x + b)|^2)
+% and x(i) is a scalar
+
+if nargin < 3, w = ones(1,length(x)); end
+
+w = w(:)';
+x = x(:)';
+z = z(:)';
+
+W = sum(w);
+Y = sum(w .* z);
+YY = sum(w .* z .* z);
+YTY = sum(w .* z .* z);
+X = sum(w .* x);
+XX = sum(w .* x .* x);
+XY = sum(w .* x .* z);
+
+[b, a] = clg_Mstep_simple(W, Y, YY, YTY, X, XX, XY);
+error = sum(w .* (z - (a*x + b)).^2 );
+
+if 0
+ % demo
+ seed = 1;
+ rand('state', seed); randn('state', seed);
+ x = -10:10;
+ N = length(x);
+ noise = randn(1,N);
+ aTrue = rand(1,1);
+ bTrue = rand(1,1);
+ z = aTrue*x + bTrue + noise;
+
+ w = ones(1,N);
+ [a, b, err] = weightedRegression(x, z, w);
+
+ b2=regress(z(:), [x(:) ones(N,1)]);
+ assert(approxeq(b,b2(2)))
+ assert(approxeq(a,b2(1)))
+
+ % Make sure we go through x(15) perfectly
+ w(15) = 1000;
+ [aW, bW, errW] = weightedRegression(x, z, w);
+
+ figure;
+ plot(x, z, 'ro')
+ hold on
+ plot(x, a*x+b, 'bx-')
+ plot(x, aW*x+bW, 'gs-')
+ title(sprintf('a=%5.2f, aHat=%5.2f, aWHat=%5.3f, b=%5.2f, bHat=%5.2f, bWHat=%5.3f, err=%5.3f, errW=%5.3f', ...
+ aTrue, a, aW, bTrue, b, bW, err, errW))
+ legend('truth', 'ls', 'wls')
+
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,176 @@
+/README.txt/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/approx_unique.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/approxeq.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/argmax.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/argmin.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/asdemo.html/1.1.1.1/Wed Mar 30 19:59:00 2005//
+/asdemo.m/1.1.1.1/Sun Mar 27 02:51:24 2005//
+/asort.m/1.1.1.1/Fri May 13 20:52:22 2005//
+/assert.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/assignEdgeNums.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/assign_cols.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/axis_pct.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/bipartiteMatchingDemo.m/1.1.1.1/Mon May 9 16:47:32 2005//
+/bipartiteMatchingDemoPlot.m/1.1.1.1/Mon May 9 04:45:10 2005//
+/bipartiteMatchingHungarian.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/bipartiteMatchingIntProg.m/1.1.1.1/Mon May 9 05:25:10 2005//
+/block.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/cell2matPad.m/1.1.1.1/Tue Jun 28 01:30:44 2005//
+/cell2num.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/centeringMatrix.m/1.1.1.1/Sun May 8 22:48:48 2005//
+/checkpsd.m/1.1.1.1/Fri Feb 7 02:25:30 2003//
+/chi2inv.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/choose.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/collapse_mog.m/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/colmult.c/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/colmult.mexglx/1.1.1.1/Tue Apr 26 02:30:30 2005//
+/computeROC.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/compute_counts.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/conf2mahal.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/cross_entropy.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/dirKPM.m/1.1.1.1/Fri May 13 20:52:22 2005//
+/div.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/draw_circle.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/draw_ellipse.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/draw_ellipse_axes.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/em_converged.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/entropy.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/exportfig.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/extend_domain_table.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/factorial.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/filepartsLast.m/1.1.1.1/Mon May 30 22:08:06 2005//
+/find_equiv_posns.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/fullfileKPM.m/1.1.1.1/Sat Aug 27 01:08:50 2005//
+/genpathKPM.m/1.1.1.1/Wed May 25 19:11:42 2005//
+/hash_add.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/hash_del.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/hash_lookup.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/hsvKPM.m/1.1.1.1/Mon May 2 20:19:00 2005//
+/hungarian.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/image_rgb.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/imresizeAspect.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/ind2subv.c/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/ind2subv.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/initFigures.m/1.1.1.1/Wed Jun 1 04:49:22 2005//
+/installC_KPMtools.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/is_psd.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/is_stochastic.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/isemptycell.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/isposdef.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/isscalar.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/isvector.m/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/junk.c/1.1.1.1/Tue Apr 26 02:30:32 2005//
+/loadcell.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/logb.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/logdet.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/logsum.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/logsum_simple.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/logsum_test.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/logsumexp.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/logsumexpv.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mahal2conf.m/1.1.1.1/Wed Apr 27 17:58:32 2005//
+/marg_table.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/marginalize_table.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/matprint.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/max_mult.c/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/max_mult.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mexutil.c/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mexutil.h/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mk_multi_index.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mk_stochastic.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mkdirKPM.m/1.1.1.1/Mon May 9 22:20:22 2005//
+/montageKPM.m/1.1.1.1/Wed Jun 1 19:39:54 2005//
+/montageKPM2.m/1.1.1.1/Wed Jul 6 19:32:54 2005//
+/montageKPM3.m/1.1.1.1/Tue Jun 28 01:35:44 2005//
+/mult_by_table.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/myintersect.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/myismember.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/myones.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/myplot.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/myrand.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/myrepmat.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/myreshape.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mysetdiff.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mysize.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mysubset.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/mysymsetdiff.m/1.1.1.1/Tue Apr 26 02:30:34 2005//
+/myunion.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/nchoose2.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/ncols.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/nonmaxsup.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/normalise.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/normaliseC.c/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/normaliseC.dll/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/normalize.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/nrows.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/num2strcell.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/optimalMatching.m/1.1.1.1/Mon May 9 22:20:22 2005//
+/optimalMatchingTest.m/1.1.1.1/Mon May 9 22:20:22 2005//
+/partitionData.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/partition_matrix_vec.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/pca_kpm.m/1.1.1.1/Tue Sep 13 05:18:28 2005//
+/pca_netlab.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/pick.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plotBox.m/1.1.1.1/Mon May 30 06:01:56 2005//
+/plotColors.m/1.1.1.1/Thu May 26 01:31:22 2005//
+/plotROC.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plotROCkpm.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plot_axis_thru_origin.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plot_ellipse.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plot_matrix.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plot_polygon.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plotcov2.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plotcov2New.m/1.1.1.1/Mon Jul 11 19:07:28 2005//
+/plotcov3.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plotgauss1d.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plotgauss2d.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/plotgauss2d_old.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/polygon_area.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/polygon_centroid.m/1.1.1.1/Tue Apr 26 02:30:36 2005//
+/polygon_intersect.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/previewfig.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/process_options.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rand_psd.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rectintC.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rectintLoopC.c/1.1.1.1/Sun Jun 5 18:46:40 2005//
+/rectintLoopC.dll/1.1.1.1/Sun Jun 5 18:46:40 2005//
+/rectintLoopC.mexglx/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rectintSparse.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rectintSparseC.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rectintSparseLoopC.c/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rectintSparseLoopC.dll/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/repmatC.c/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/repmatC.dll/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/repmatC.mexglx/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rgb2grayKPM.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rnd_partition.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/rotate_xlabel.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/safeStr.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/sampleUniformInts.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/sample_discrete.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/set_xtick_label.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/set_xtick_label_demo.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/setdiag.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/softeye.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/sort_evec.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/splitLongSeqIntoManyShort.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/sprintf_intvec.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/sqdist.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/strmatch_multi.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/strmatch_substr.m/1.1.1.1/Tue Apr 26 02:30:38 2005//
+/strsplit.m/1.1.1.1/Tue May 3 19:01:46 2005//
+/subplot2.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/subplot3.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/subsets.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/subsets1.m/1.1.1.1/Mon May 9 22:20:22 2005//
+/subsetsFixedSize.m/1.1.1.1/Mon May 9 02:55:36 2005//
+/subv2ind.c/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/subv2ind.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/sumv.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/suptitle.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/unaryEncoding.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/wrap.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/xticklabel_rotate90.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/zipload.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+/zipsave.m/1.1.1.1/Tue Apr 26 02:30:40 2005//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/KPMtools
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/README.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/README.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+KPMtools is a directory of miscellaneous matlab functions written by
+Kevin Patrick Murphy and various other people (see individual file headers).
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/approx_unique.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/approx_unique.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function [B, keep] = approx_unique(A, thresh, flag)
+% APPROX_UNIQUE Return elements of A that differ from the rest by less than thresh
+% B = approx_unique(A, thresh)
+% B = approx_unique(A, thresh, 'rows')
+
+keep = [];
+
+if nargin < 3 | isempty(flag)
+ A = sort(A)
+ B = A(1);
+ for i=2:length(A)
+ if ~approxeq(A(i), A(i-1), thresh)
+ B = [B A(i)];
+ keep = [keep i];
+ end
+ end
+else
+% A = sortrows(A);
+% B = A(1,:);
+% for i=2:size(A,1)
+% if ~approxeq(A(i,:), A(i-1,:), thresh)
+% B = [B; A(i,:)];
+% keep = [keep i];
+% end
+% end
+ B = [];
+ for i=1:size(A,1)
+ duplicate = 0;
+ for j=i+1:size(A,1)
+ if approxeq(A(i,:), A(j,:), thresh)
+ duplicate = 1;
+ break;
+ end
+ end
+ if ~duplicate
+ B = [B; A(i,:)];
+ keep = [keep i];
+ end
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/approxeq.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/approxeq.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function p = approxeq(a, b, tol, rel)
+% APPROXEQ Are a and b approximately equal (to within a specified tolerance)?
+% p = approxeq(a, b, thresh)
+% 'tol' defaults to 1e-3.
+% p(i) = 1 iff abs(a(i) - b(i)) < thresh
+%
+% p = approxeq(a, b, thresh, 1)
+% p(i) = 1 iff abs(a(i)-b(i))/abs(a(i)) < thresh
+
+if nargin < 3, tol = 1e-2; end
+if nargin < 4, rel = 0; end
+
+a = a(:);
+b = b(:);
+d = abs(a-b);
+if rel
+ p = ~any( (d ./ (abs(a)+eps)) > tol);
+else
+ p = ~any(d > tol);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/argmax.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/argmax.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function indices = argmax(v)
+% ARGMAX Return as a subscript vector the location of the largest element of a multidimensional array v.
+% indices = argmax(v)
+%
+% Returns the first maximum in the case of ties.
+% Example:
+% X = [2 8 4; 7 3 9];
+% argmax(X) = [2 3], i.e., row 2 column 3
+
+[m i] = max(v(:));
+indices = ind2subv(mysize(v), i);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/argmin.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/argmin.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function indices = argmin(v)
+% ARGMIN Return as a subscript vector the location of the smallest element of a multidimensional array v.
+% indices = argmin(v)
+%
+% Returns the first minimum in the case of ties.
+% Example:
+% X = [2 8 4; 7 3 9];
+% argmin(X) = [1 1], i.e., row 1 column 1
+
+[m i] = min(v(:));
+indices = ind2subv(mysize(v), i);
+%indices = ind2subv(size(v), i);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/asdemo.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/asdemo.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,690 @@
+
+
+
+
+
+
+
+
+asdemo
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
% ASORT
% a pedestrian NUMERICAL SORTER of ALPHANUMERIC data
% - create some data
         d = {
%Â Â Â Â Â Â Â Â strings with one valid alphanumeric number
%Â Â Â Â Â Â Â Â sorted numerically
                 '-inf'
                 'x-3.2e4y'
                 'f-1.4'
                 '-.1'
                 '+ .1d-2'
                 '.1'
                 'f.1'
                 'f -+1.4'
                 'f.2'
                 'f.3'
                 'f.10'
                 'f.11'
                 '+inf'
                 ' -nan'
                 '+ nan'
                 'nan'
%Â Â Â Â Â Â Â Â strings with many numbers or invalid/ambiguous numbers
%Â Â Â Â Â Â Â Â sorted in ascii dictionary order
                 ' nannan'
                 '+ .1e-.2'
                 '-1 2'
                 'Z12e12ez'
                 'inf -inf'
                 's.3TT.4'
                 'z12e12ez'
%Â Â Â Â Â Â Â Â strings without numbers
%Â Â Â Â Â Â Â Â sorted in ascii dictionary order
                 ' . .. '
                 '.'
                 '...'
                 '.b a.'
                 'a string'
                 'a. .b'
         };
%Â Â ... and scramble it...
         rand('seed',10);
         d=d(randperm(numel(d)));
% - run ASORT with
%Â Â verbose output:Â Â Â Â Â Â Â Â Â Â Â Â Â <-v>
%Â Â keep additional results:Â Â Â Â <-d>
         o=asort(d,'-v','-d');
% - or
%Â Â Â Â Â Â Â Â p=asort(char(d),'-v','-d');
   'INPUT'      'ASCII SORT'   'NUM SORT'            'NUM READ'       Â
    '...'        ' -nan'        '--- NUMERICAL'       '--- NUMBERS'    Â
    '+ .1e-.2'   ' . .. '       '-inf'                [            -Inf]
   '.1'         ' nannan'     'x-3.2e4y'            [          -32000]
   '.b a.'      '+ .1d-2'      'f-1.4'               [            -1.4]
   '-inf'       '+ .1e-.2'     '-.1'                 [            -0.1]
   'f.1'        '+ nan'        '+ .1d-2'             [           0.001]
   ' -nan'      '+inf'         '.1'                  [             0.1]
   '-1 2'       '-.1'          'f.1'                 [               1]
   'nan'        '-1 2'         'f -+1.4'             [             1.4]
   'a string'   '-inf'         'f.2'                 [               2]
   'f.3'        '.'            'f.3'                 [               3]
   '+ .1d-2'    '...'          'f.10'                [              10]
   'a. .b'      '.1'           'f.11'                [              11]
   's.3TT.4'    '.b a.'        '+inf'                [             Inf]
   '+inf'       'Z12e12ez'     ' -nan'               [             NaN]
   ' nannan'   'a string'     '+ nan'               [             NaN]
   'f-1.4'      'a. .b'        'nan'                 [             NaN]
   'x-3.2e4y'   'f -+1.4'      '--- ASCII NUMBERS'   '--- ASCII NUMBERS'
   'inf -inf'   'f-1.4'        ' nannan'            ' nannan'       Â
    '+ nan'      'f.1'          '+ .1e-.2'            '+ .1e-.2'       Â
    'f.2'        'f.10'         '-1 2'                '-1 2'           Â
    'f.11'       'f.11'         'Z12e12ez'            'Z12e12ez'       Â
    'Z12e12ez'   'f.2'          'inf -inf'            'inf -inf'       Â
    'z12e12ez'   'f.3'          's.3TT.4'             's.3TT.4'        Â
    'f -+1.4'    'inf -inf'     'z12e12ez'            'z12e12ez'       Â
    ' . .. '     'nan'          '--- ASCII STRINGS'   '--- ASCII STRINGS'
   'f.10'       's.3TT.4'      ' . .. '              ' . .. '         Â
    '.'          'x-3.2e4y'     '.'                   '.'              Â
    '-.1'        'z12e12ez'     '...'                 '...'            Â
    ' '          ''            '.b a.'               '.b a.'          Â
    ' '          ''            'a string'            'a string'       Â
    ' '          ''            'a. .b'               'a. .b'          Â
% - show results
         o
o =
          magic: 'ASORT'
           ver: '30-Mar-200511:57:07'
          time: '30-Mar-2005 11:57:17'
       runtime: 0.047
   input_class: 'cell'
   input_msize: [29 1]
   input_bytes: 2038
   strng_class: 'char'
   strng_msize: [29 8]
   strng_bytes: 464
           anr: {16x1 cell}
           snr: {7x1 cell}
           str: {6x1 cell}
             c: [29x12 char]
             t: [29x12 logical]
             n: [16x12 char]
             d: [16x1 double]
         o.anr
ans =
    '-inf'
   'x-3.2e4y'
   'f-1.4'
   '-.1'
   '+ .1d-2'
   '.1'
   'f.1'
   'f -+1.4'
   'f.2'
   'f.3'
   'f.10'
   'f.11'
   '+inf'
   ' -nan'
   '+ nan'
   'nan'
% - run ASORT with no-space/template options
%Â Â NOTE the impact of -w/-t order!
         s={'ff - 1','ff + 1','- 12'};
%Â Â RAW
         o=asort(s,'-v');
   'INPUT'    'ASCII SORT'   'NUM SORT'            'NUM READ'       Â
    'ff - 1'   '- 12'         '--- NUMERICAL'       '--- NUMBERS'    Â
    'ff + 1'   'ff + 1'       'ff + 1'              [               1]
   '- 12'     'ff - 1'       'ff - 1'              [               1]
   ' '        ''            '- 12'                [              12]
   ' '        ''            '--- ASCII NUMBERS'   '--- ASCII NUMBERS'
   ' '        ''            '--- ASCII STRINGS'   '--- ASCII STRINGS'
%Â Â removeSPACEs
         o=asort(s,'-v','-w');
   'INPUT'   'ASCII SORT'   'NUM SORT'            'NUM READ'       Â
    'ff-1'     '-12'          '--- NUMERICAL'       '--- NUMBERS'    Â
    'ff+1'    'ff+1'         '-12'                 [             -12]
   '-12'     'ff-1'         'ff-1'                [              -1]
   ' '       ''            'ff+1'                [               1]
   ' '       ''            '--- ASCII NUMBERS'   '--- ASCII NUMBERS'
   ' '       ''            '--- ASCII STRINGS'   '--- ASCII STRINGS'
%Â Â removeTEMPLATE(s)
         o=asort(s,'-v','-t',{'ff','1'});
   'INPUT'   'ASCII SORT'   'NUM SORT'            'NUM READ'       Â
    ' - '      ' + '          '--- NUMERICAL'       '--- NUMBERS'    Â
    ' + '      ' - '          '- 2'                 [               2]
   '- 2'     '- 2'          '--- ASCII NUMBERS'   '--- ASCII NUMBERS'
   ' '       ''            '--- ASCII STRINGS'   '--- ASCII STRINGS'
   ' '       ''            ' + '                 ' + '            Â
    ' '       ''            ' - '                 ' - '            Â
%Â Â removeTEMPLATE(s) than SPACEs
         o=asort(s,'-v','-t','1','-w');
   'INPUT'   'ASCII SORT'   'NUM SORT'            'NUM READ'       Â
    'ff-'     '-2'           '--- NUMERICAL'       '--- NUMBERS'    Â
    'ff+'      'ff+'          '-2'                  [              -2]
   '-2'      'ff-'          '--- ASCII NUMBERS'   '--- ASCII NUMBERS'
   ' '       ''            '--- ASCII STRINGS'   '--- ASCII STRINGS'
   ' '       ''            'ff+'                 'ff+'            Â
    ' '       ''            'ff-'                 'ff-'            Â
%Â Â removeSPACEs than TEMPLATE(s)
         o=asort(s,'-v','-w','-t','1');
   'INPUT'   'ASCII SORT'   'NUM SORT'            'NUM READ'       Â
    'ff- '    '- 2'          '--- NUMERICAL'       '--- NUMBERS'    Â
    'ff+ '    'ff+ '         '- 2'                 [               2]
   '- 2'     'ff- '         '--- ASCII NUMBERS'   '--- ASCII NUMBERS'
   ' '       ''            '--- ASCII STRINGS'   '--- ASCII STRINGS'
   ' '       ''            'ff+ '                'ff+ '           Â
    ' '       ''            'ff- '                'ff- '           Â
+
+
+Published with MATLAB® 7.0.4
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/asdemo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/asdemo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,70 @@
+% ASORT
+% a pedestrian NUMERICAL SORTER of ALPHANUMERIC data
+
+% - create some data
+ d = {
+% strings with one valid alphanumeric number
+% sorted numerically
+ '-inf'
+ 'x-3.2e4y'
+ 'f-1.4'
+ '-.1'
+ '+ .1d-2'
+ '.1'
+ 'f.1'
+ 'f -+1.4'
+ 'f.2'
+ 'f.3'
+ 'f.10'
+ 'f.11'
+ '+inf'
+ ' -nan'
+ '+ nan'
+ 'nan'
+% strings with many numbers or invalid/ambiguous numbers
+% sorted in ascii dictionary order
+ ' nan nan'
+ '+ .1e-.2'
+ '-1 2'
+ 'Z12e12ez'
+ 'inf -inf'
+ 's.3TT.4'
+ 'z12e12ez'
+% strings without numbers
+% sorted in ascii dictionary order
+ ' . .. '
+ '.'
+ '...'
+ '.b a.'
+ 'a string'
+ 'a. .b'
+ };
+% ... and scramble it...
+ rand('seed',10);
+ d=d(randperm(numel(d)));
+
+% - run ASORT with
+% verbose output: <-v>
+% keep additional results: <-d>
+ o=asort(d,'-v','-d');
+% - or
+% p=asort(char(d),'-v','-d');
+
+% - show results
+ o
+ o.anr
+
+% - run ASORT with no-space/template options
+% NOTE the impact of -w/-t order!
+ s={'ff - 1','ff + 1','- 12'};
+% RAW
+ o=asort(s,'-v');
+% remove SPACEs
+ o=asort(s,'-v','-w');
+% remove TEMPLATE(s)
+ o=asort(s,'-v','-t',{'ff','1'});
+% remove TEMPLATE(s) than SPACEs
+ o=asort(s,'-v','-t','1','-w');
+% remove SPACEs than TEMPLATE(s)
+ o=asort(s,'-v','-w','-t','1');
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/asort.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/asort.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,376 @@
+%[ANR,SNR,STR] = ASORT(INP,'OPT',...);
+% S = ASORT(INP,'OPT',...);
+% to sort alphanumeric strings numerically if
+% they contain one properly formatted number
+% otherwise, ascii dictionary sorting is applied
+%
+% INP unsorted input:
+% - a char array
+% - a cell array of strings
+% OPT options
+% -s - sorting option
+% '-s','ascend' [def]
+% '-s','descend'
+% -st - force output form S [def: nargout dependent]
+% -t - replace matching template(s) with one space
+% prior to sorting
+% '-t','template'
+% '-t',{'template1','template2',...}
+% -w - remove space(s) prior to sorting
+%
+% NOTE -t/-w options are processed in the
+% order that they appear in
+% the command line
+%
+% -v - verbose output [def: quiet]
+% -d - debug mode
+% save additional output in S
+% .c: lex parser input
+% .t: lex parser table
+% .n: lex parser output
+% .d: numbers read from .n
+%
+% ANR numerically sorted alphanumeric strings [eg, 'f.-1.5e+2x.x']
+% - contain one number that can be read by
+% |
+% SNR ascii dict sorted alphanumeric strings
+% http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=7212#
+%
+% - contain more than one number [eg, 'f.-1.5e +2.x']
+% - contain incomplete|ambiguous numbers [eg, 'f.-1.5e+2.x']
+% STR ascii dict sorted strings
+% - contain no numbers [eg, 'a test']
+%
+% S structure with fields
+% .anr
+% .srn
+% .str
+
+% created:
+% us 03-Mar-2002
+% modified:
+% us 30-Mar-2005 11:57:07 / TMW R14.sp2
+
+%--------------------------------------------------------------------------------
+function varargout=asort(inp,varargin)
+
+varargout(1:nargout)={[]};
+if ~nargin
+ help(mfilename);
+ return;
+end
+
+% - common parameters/options
+n=[];
+ds=[];
+anr={};
+snr={};
+str={};
+smod='ascend'; % sorting option
+tmpl={}; % template(s)
+sflg=false; % output mode: structure
+tflg=false; % remove template(s)
+dflg=false; % debug mode
+vflg=false; % verbose output
+wflg=false; % remove spaces
+
+if nargin > 1
+ ix=find(strcmp('-s',varargin));
+ if ~isempty(ix) && nargin > ix(end)+1
+ smod=varargin{ix(end)+1};
+ end
+ ix=find(strcmp('-t',varargin));
+ if ~isempty(ix) && nargin > ix(end)+1
+ tflg=ix(end);
+ tmpl=varargin{ix(end)+1};
+ end
+ if find(strcmp('-d',varargin));
+ dflg=true;
+ end
+ if find(strcmp('-st',varargin));
+ sflg=true;
+ end
+ if find(strcmp('-v',varargin));
+ vflg=true;
+ end
+ ix=find(strcmp('-w',varargin));
+ if ~isempty(ix)
+ wflg=ix(end);
+ end
+end
+% spec numbers
+ntmpl={
+ ' inf '
+ '+inf '
+ '-inf '
+ ' nan '
+ '+nan '
+ '-nan '
+ };
+% spec chars
+ctmpl={
+ '.' % decimal point
+ 'd' % exponent
+ 'e' % exponent
+ };
+
+if nargout <= 3
+ varargout{1}=inp;
+else
+ disp(sprintf('ASORT> too many output args [%-1d/%-1d]\n',nargout,3));
+ help(mfilename);
+ return;
+end
+if isempty(inp)
+ disp(sprintf('ASORT> input is empty'));
+ return;
+end
+
+ti=clock;
+winp=whos('inp');
+switch winp.class
+ case 'cell'
+ if ~iscellstr(inp)
+ disp(sprintf('ASORT> cell is not an array of strings'));
+ return;
+ end
+ inp=inp(:);
+ [ins,inx]=sort(inp);
+ case 'char'
+ % [ins,inx]=sortrows(inp);
+ inp=cstr(inp);
+ otherwise
+ disp(sprintf('ASORT> does not sort input of class <%s>',winp.class));
+ return;
+end
+
+inp=inp(:);
+inp=setinp(inp,tmpl,[tflg wflg]);
+[ins,inx]=sort(inp);
+if strcmp(smod,'descend')
+ ins=ins(end:-1:1,:);
+ inx=inx(end:-1:1);
+end
+ins=inp(inx);
+c=lower(char(ins));
+wins=whos('c');
+[cr,cc]=size(c);
+
+% - LEXICAL PARSER
+%--------------------------------------------------------------------------------
+% - extend input on either side for search
+c=[' '*ones(cr,2) c ' '*ones(cr,2)];
+
+% - search for valid alphanumeric items in strings
+% numbers/signs
+t=(c>='0'&c<='9');
+t=t|c=='-';
+t=t|c=='+';
+[tr,tc]=size(t);
+% decimal points
+% note: valid numbers with dec points must follow these templates
+% nr.nr
+% sign.nr
+% nr.
+% .nr
+ix1= t(:,1:end-2) & ...
+ ~isletter(c(:,1:end-2)) & ...
+ c(:,2:end-1)=='.';
+t(:,2:end-1)=t(:,2:end-1)|ix1;
+ix1= (t(:,3:end) & ...
+ (~isletter(c(:,3:end)) & ...
+ ~isletter(c(:,1:end-2))) | ...
+ (c(:,3:end)=='e' | ...
+ c(:,3:end)=='d')) & ...
+ c(:,2:end-1)=='.';
+t(:,2:end-1)=t(:,2:end-1)|ix1;
+% t(:,3:end)=t(:,3:end)|ix1;
+% signs
+t(c=='-')=false;
+t(c=='+')=false;
+ix1= t(:,3:end) & ...
+ (c(:,2:end-1)=='-' | ...
+ c(:,2:end-1)=='+');
+t(:,2:end-1)=t(:,2:end-1)|ix1;
+% exponents
+ix1= t(:,1:end-2) & ...
+ (c(:,2:end-1)=='e' | ...
+ c(:,2:end-1)=='d');
+t(:,2:end-1)=t(:,2:end-1)|ix1;
+% spec numbers
+c=reshape(c.',1,[]);
+t=t';
+ic=[];
+for j=1:numel(ntmpl)
+ ic=[ic,strfind(c,ntmpl{j})];
+end
+ic=sort(ic);
+for i=1:numel(ic)
+ ix=ic(i)+0:ic(i)+4;
+ t(ix)=true;
+end
+t=t';
+c=reshape(c.',[tc,tr]).';
+t(c==' ')=false;
+%--------------------------------------------------------------------------------
+
+% - only allow one number per string
+il=~any(t,2);
+ib=strfind(reshape(t.',1,[]),[0 1]);
+if ~isempty(ib)
+ ixe=cell(3,1);
+ n=reshape(char(t.*c).',1,[]);
+ for i=1:numel(ctmpl)
+ id=strfind(n,ctmpl{i});
+ if ~isempty(id)
+ [dum,dum,ixu{i},ixe{i}]=dupinx(id,tc);
+ end
+ end
+ in=false(tr,1);
+ im=in;
+ % must check for anomalous cases like <'.d'>
+ id=sort(...
+ [find(n>='0' & n<='9'),...
+ strfind(n,'inf'),...
+ strfind(n,'nan')]);
+ % [ibu,ibd,ixbu,ixe{i+1}]=dupinx(id,tc);
+ [ibu,ibd,ixbu,ixbd]=dupinx(id,tc);
+ in(ixbu)=true;
+ in(ixbd)=true;
+ [ibu,ibd,ixbu,ixbd]=dupinx(ib,tc);
+ im(ixbu)=true;
+ in=in&im;
+ in([ixe{:}])=false;
+ il=~any(t,2);
+ ia=~(in|il);
+
+ % - read valid strings
+ n=t(in,:).*c(in,:);
+ n(n==0)=' ';
+ n=char(n);
+ dn=strread(n.','%n');
+ if numel(dn) ~= numel(find(in))
+ %disp(sprintf('ASORT> unexpected fatal error reading input!'));
+ if nargout
+ s.c=c;
+ s.t=t;
+ s.n=n;
+ s.d=dn;
+ varargout{1}=s;
+ end
+ return;
+ end
+
+ % - sort numbers
+ [ds,dx]=sort(dn,1,smod);
+ in=find(in);
+ anr=ins(in(dx));
+ snr=ins(ia);
+end
+str=ins(il);
+to=clock;
+
+% - prepare output
+if nargout < 3 || sflg
+ s.magic='ASORT';
+ s.ver='30-Mar-2005 11:57:07';
+ s.time=datestr(clock);
+ s.runtime=etime(to,ti);
+ s.input_class=winp.class;
+ s.input_msize=winp.size;
+ s.input_bytes=winp.bytes;
+ s.strng_class=wins.class;
+ s.strng_msize=wins.size;
+ s.strng_bytes=wins.bytes;
+ s.anr=anr;
+ s.snr=snr;
+ s.str=str;
+ if dflg
+ s.c=c;
+ s.t=t;
+ s.n=n;
+ s.d=ds;
+ end
+ varargout{1}=s;
+else
+ s={anr,snr,str};
+ for i=1:nargout
+ varargout{i}=s{i};
+ end
+end
+
+if vflg
+ inp=cstr(inp);
+ an=[{'--- NUMERICAL'}; anr];
+ as=[{'--- ASCII NUMBERS'}; snr];
+ at=[{'--- ASCII STRINGS'}; str];
+ nn=[{'--- NUMBERS'}; num2cell(ds)];
+ ag={' ';' ';' '};
+ u=[{'INPUT'}; inp;ag];
+ v=[{'ASCII SORT'}; ins;ag];
+ w=[{'NUM SORT'}; an;as;at];
+ x=[{'NUM READ'}; nn;as;at];
+ w=[u,v,w,x];
+ disp(w);
+end
+
+return;
+%--------------------------------------------------------------------------------
+function c=cstr(s)
+% - bottleneck waiting for a good replacement
+% it consumes ~75% of 's processing time!
+
+c=s;
+if ischar(s)
+ sr=size(s,1);
+ c=cell(sr,1);
+ for i=1:sr
+ c{i}=s(i,:); % no deblanking!
+ end
+end
+return;
+%--------------------------------------------------------------------------------
+function [idu,idd,ixu,ixd]=dupinx(ix,nc)
+% - check for more than one entry/row in a matrix of column size
+% unique indices: idu / ixu
+% duplicate indices: idd / ixd
+
+if isempty(ix)
+ idu=[];
+ idd=[];
+ ixu=[];
+ ixd=[];
+ return;
+end
+id=fix(ix/nc)+1;
+idi=diff(id)~=0;
+ide=[true idi];
+idb=[idi true];
+idu=idb & ide;
+idd=idb==1 & ide==0;
+ixu=id(idu);
+ixd=id(idd);
+return;
+%--------------------------------------------------------------------------------
+function inp=setinp(inp,tmpl,flg)
+% - remove space(s) and/or templates
+
+if isempty(inp) || ~any(flg)
+ return;
+end
+
+for i=sort(flg)
+ switch i
+ case flg(1)
+ if ischar(tmpl)
+ tmpl={tmpl};
+ end
+ for i=1:numel(tmpl)
+ inp=strrep(inp,tmpl{i},' ');
+ end
+ case flg(2)
+ inp=strrep(inp,' ','');
+ end
+end
+return;
+%--------------------------------------------------------------------------------
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/assert.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/assert.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function assert(pred, str)
+% ASSERT Raise an error if the predicate is not true.
+% assert(pred, string)
+
+if nargin<2, str = ''; end
+
+if ~pred
+ s = sprintf('assertion violated: %s', str);
+ error(s);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/assertBNT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/assertBNT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function assertBNT(pred, str)
+% ASSERT Raise an error if the predicate is not true.
+% assert(pred, string)
+
+if nargin<2, str = ''; end
+
+if ~pred
+ s = sprintf('assertion violated: %s', str);
+ error(s);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/assignEdgeNums.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/assignEdgeNums.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function [edge_id, nedges] = assignEdgeNums(adj_mat)
+% give each edge a unique number
+% we number (i,j) for j>i first, in row, column order.
+% Then we number the reverse links
+
+nnodes = length(adj_mat);
+edge_id = zeros(nnodes);
+e = 1;
+for i=1:nnodes
+ for j=i+1:nnodes
+ if adj_mat(i,j)
+ edge_id(i,j) = e;
+ e = e+1;
+ end
+ end
+end
+
+nedges = e-1;
+tmp = edge_id;
+ndx = find(tmp);
+tmp(ndx) = tmp(ndx)+nedges;
+edge_id = edge_id + triu(tmp)';
+
+
+if 0
+ndx = find(adj_mat);
+nedges = length(ndx);
+nnodes = length(adj_mat);
+edge_id = zeros(1, nnodes*nnodes);
+edge_id(ndx) = 1:nedges;
+edge_id = reshape(edge_id, nnodes, nnodes);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/assign_cols.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/assign_cols.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+function M = assign_cols(cols, vals, M)
+% ASSIGN_COLS Assign values to columns of a matrix
+% function M = assign_cols(M, cols, vals, M)
+%
+% Example:
+% M = assign_cols(data, ones(1,N))
+% will construct a 1-of-K encoding of the data, where K=ncols=max(data) and N=nrows=length(data)
+%
+% Example:
+% M = zeros(3,2);
+% M = assign_cols([1 2 1], [10 20 30], M)
+% is equivalent to
+% M(1, 1) = 10
+% M(2, 2) = 20
+% M(3, 1) = 30
+%
+
+if nargin < 3
+ nr = length(cols);
+ nc = max(cols);
+ M = zeros(nr, nc);
+else
+ [nr nc] = size(M);
+end
+
+if 0
+for r=1:nr
+ M(r, cols(r)) = vals(r);
+end
+end
+
+if 1
+rows = 1:nr;
+ndx = subv2ind([nr nc], [rows(:) cols(:)]);
+M(ndx) = vals;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/axis_pct.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/axis_pct.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,70 @@
+function ax = axis_pct(pct)
+% AXIS_PCT Set reasonable axis limits.
+% AXIS_PCT(pct) sets axis limits to extend pct% beyond limits of plotted
+% objects. Default is 5%.
+% Works for linear or log scale.
+% Unfortunately, the axes won't change when new points are plotted.
+
+if nargin < 1
+ pct = 0.05;
+end
+ax = [Inf -Inf Inf -Inf Inf -Inf];
+
+% find bounding box of plotted objects
+children = get(gca,'children');
+for child = children'
+ if strcmp(get(child,'type'),'text')
+ xyz = get(child,'position');
+ % need to determine bounding box of the text
+ c([1 2]) = xyz(1);
+ c([3 4]) = xyz(2);
+ c([5 6]) = xyz(3);
+ else
+ x = get(child,'xdata');
+ c(1) = min(x);
+ c(2) = max(x);
+ y = get(child,'ydata');
+ c(3) = min(y);
+ c(4) = max(y);
+ z = get(child,'zdata');
+ if isempty(z)
+ c([5 6]) = 0;
+ else
+ c(5) = min(z);
+ c(6) = max(z);
+ end
+ end
+ ax([1 3 5]) = min(ax([1 3 5]), c([1 3 5]));
+ ax([2 4 6]) = max(ax([2 4 6]), c([2 4 6]));
+end
+if strcmp(get(gca,'xscale'), 'log')
+ ax([1 2]) = log(ax([1 2]));
+end
+if strcmp(get(gca,'yscale'), 'log')
+ ax([3 4]) = log(ax([3 4]));
+end
+dx = ax(2)-ax(1);
+if dx == 0
+ dx = 1;
+end
+dy = ax(4)-ax(3);
+if dy == 0
+ dy = 1;
+end
+dz = ax(6)-ax(5);
+if dz == 0
+ dz = 1;
+end
+ax = ax + [-dx dx -dy dy -dz dz]*pct;
+if strcmp(get(gca,'xscale'), 'log')
+ ax([1 2]) = exp(ax([1 2]));
+end
+if strcmp(get(gca,'yscale'), 'log')
+ ax([3 4]) = exp(ax([3 4]));
+end
+% clip for 2D
+ax = ax(1:length(axis));
+axis(ax);
+if nargout < 1
+ clear ax
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingDemo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingDemo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,112 @@
+% Consider matching sources to detections
+
+% s1 d2
+% s2 d3
+% d1
+
+%a = bipartiteMatchingHungarian([52;0.01])
+
+% sources(:,i) = [x y] coords
+sources = [0.1 0.7; 0.6 0.4]';
+detections = [0.2 0.2; 0.2 0.8; 0.7 0.1]';
+dst = sqdist(sources, detections);
+
+% a = [2 3] which means s1-d2, s2-d3
+a = bipartiteMatchingHungarian(dst);
+a2 = bipartiteMatchingIntProg(dst);
+assert(isequal(a(:),a2(:)))
+
+
+figure(1); clf
+bipartiteMatchingDemoPlot(sources, detections, a)
+
+
+
+
+%%%% Flip roles of sources and detections
+
+%dst = dst';
+dst = sqdist(detections, sources);
+% a = [0 1 2] which means d1-0, d2-s1, d3-s2
+a = bipartiteMatchingHungarian(dst);
+
+a2 = bipartiteMatchingIntProg(dst);
+assert(isequal(a(:),a2(:)))
+
+figure(2); clf
+bipartiteMatchingDemoPlot(detections, sources, a) % swapped args
+
+
+
+
+%%%%%%%%%% Move s1 nearer to d1
+% d2
+% s2 d3
+% s1 d1
+
+sources = [0.1 0.3; 0.6 0.4]';
+detections = [0.2 0.2; 0.2 0.8; 0.7 0.1]';
+dst = sqdist(sources, detections);
+
+% a = [2 3] which means s1-d2, s2-d3
+a = bipartiteMatchingHungarian(dst);
+[a2, ass] = bipartiteMatchingIntProg(dst);
+assert(isequal(a(:),a2(:)))
+
+
+figure(3); clf
+bipartiteMatchingDemoPlot(sources, detections, a)
+
+
+
+%%%%%%%%%%
+
+% Use random points
+
+% Generate 2D data from a mixture of 2 Gaussians (from netlab demgmm1)
+randn('state', 0); rand('state', 0);
+gmix = gmm(2, 2, 'spherical');
+ndat1 = 10; ndat2 = 10; ndata = ndat1+ndat2;
+%gmix.centres = [0.3 0.3; 0.7 0.7];
+%gmix.covars = [0.01 0.01];
+gmix.centres = [0.5 0.5; 0.5 0.5];
+gmix.covars = [0.1 0.01];
+[x, label] = gmmsamp(gmix, ndata);
+
+ndx = find(label==1);
+sources = x(ndx,:)';
+ndx = find(label==2);
+detections = x(ndx,:)';
+dst = sqdist(sources, detections);
+
+[a, ass] = bipartiteMatchingIntProg(dst);
+[a2] = bipartiteMatchingHungarian(dst);
+assert(isequal(a(:), a2(:)))
+
+figure(4); clf
+bipartiteMatchingDemoPlot(sources, detections, a)
+
+% only match 80% of points
+p1 = size(sources, 2);
+p2 = size(detections, 2);
+nmatch = ceil(0.8*min(p1,p2));
+a2 = bipartiteMatchingIntProg(dst, nmatch);
+figure(5); clf
+bipartiteMatchingDemoPlot(sources, detections, a2)
+
+
+%%% swap roles
+
+ndx = find(label==2);
+sources = x(ndx,:)';
+ndx = find(label==1);
+detections = x(ndx,:)';
+dst = sqdist(sources, detections);
+
+% only match 80% of points
+p1 = size(sources, 2);
+p2 = size(detections, 2);
+nmatch = ceil(0.8*min(p1,p2));
+a2 = bipartiteMatchingIntProg(dst, nmatch);
+figure(6); clf
+bipartiteMatchingDemoPlot(sources, detections, a2)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingDemoPlot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingDemoPlot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function bipartiteMatchingDemoPlot(sources, detections, a)
+
+hold on
+p1 = size(sources,2);
+p2 = size(detections,2);
+for i=1:p1
+ h=text(sources(1,i), sources(2,i), sprintf('s%d', i));
+ set(h, 'color', 'r');
+end
+for i=1:p2
+ h=text(detections(1,i), detections(2,i), sprintf('d%d', i));
+ set(h, 'color', 'b');
+end
+
+if nargin < 3, return; end
+
+for i=1:p1
+ j = a(i);
+ if j==0 % i not matched to anything
+ continue
+ end
+ line([sources(1,i) detections(1,j)], [sources(2,i) detections(2,j)])
+end
+axis_pct;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingHungarian.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingHungarian.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,90 @@
+% MATCH - Solves the weighted bipartite matching (or assignment)
+% problem.
+%
+% Usage: a = match(C);
+%
+% Arguments:
+% C - an m x n cost matrix; the sets are taken to be
+% 1:m and 1:n; C(i, j) gives the cost of matching
+% items i (of the first set) and j (of the second set)
+%
+% Returns:
+%
+% a - an m x 1 assignment vector, which gives the
+% minimum cost assignment. a(i) is the index of
+% the item of 1:n that was matched to item i of
+% 1:m. If item i (of 1:m) was not matched to any
+% item of 1:n, then a(i) is zero.
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+function [a] = optimalMatching(C)
+
+% Trivial cases:
+[p, q] = size(C);
+if (p == 0)
+ a = [];
+ return;
+elseif (q == 0)
+ a = zeros(p, 1);
+ return;
+end
+
+
+if 0
+% First, reduce the problem by making easy optimal matches. If two
+% elements agree that they are the best match, then match them up.
+[x, a] = min(C, [], 2);
+[y, b] = min(C, [], 1);
+u = find(1:p ~= b(a(:)));
+a(u) = 0;
+v = find(1:q ~= a(b(:))');
+C = C(u, v);
+if (isempty(C)) return; end
+end
+
+% Get the (new) size of the two sets, u and v.
+[m, n] = size(C);
+
+%mx = realmax;
+mx = 2*max(C(:));
+mn = -2*min(C(:));
+% Pad the affinity matrix to be square
+if (m < n)
+ C = [C; mx * ones(n - m, n)];
+elseif (n < m)
+ C = [C, mx * ones(m, m - n)];
+end
+
+% Run the Hungarian method. First replace infinite values by the
+% largest (or smallest) finite values.
+C(find(isinf(C) & (C > 0))) = mx;
+C(find(isinf(C) & (C < 0))) = mn;
+%fprintf('running hungarian\n');
+[b, cost] = hungarian(C');
+
+% Extract only the real assignments
+ap = b(1:m)';
+ap(find(ap > n)) = 0;
+
+a = ap;
+%% Incorporate this sub-assignment into the complete assignment
+% k = find(ap);
+% a(u(k)) = v(ap(k));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingIntProg.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingIntProg.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,69 @@
+function [a,ass] = bipartiteMatchingIntProg(dst, nmatches)
+% BIPARTITEMATCHINGINTPROG Use binary integer programming (linear objective) to solve for optimal linear assignment
+% function a = bipartiteMatchingIntProg(dst)
+% a(i) = best matching column for row i
+%
+% This gives the same result as bipartiteMatchingHungarian.
+%
+% function a = bibpartiteMatchingIntProg(dst, nmatches)
+% only matches the specified number (must be <= min(size(dst))).
+% This can be used to allow outliers in both source and target.
+%
+% For details, see Marciel & Costeira, "A global solution to sparse correspondence
+% problems", PAMI 25(2), 2003
+
+if nargin < 2, nmatches = []; end
+
+[p1 p2] = size(dst);
+p1orig = p1; p2orig = p2;
+dstorig = dst;
+
+if isempty(nmatches) % no outliers allowed (modulo size difference)
+ % ensure matrix is square
+ m = max(dst(:));
+ if p1p2
+ dst = [dst m*ones(p1, p1-p2)];
+ end
+end
+[p1 p2] = size(dst);
+
+
+c = dst(:); % vectorize cost matrix
+
+% row-sum: ensure each column sums to 1
+A2 = kron(eye(p2), ones(1,p1));
+b2 = ones(p2,1);
+
+% col-sum: ensure each row sums to 1
+A3 = kron(ones(1,p2), eye(p1));
+b3 = ones(p1,1);
+
+if isempty(nmatches)
+ % enforce doubly stochastic
+ A = [A2; A3];
+ b = [b2; b3];
+ Aineq = zeros(1, p1*p2);
+ bineq = 0;
+else
+ nmatches = min([nmatches, p1, p2]);
+ Aineq = [A2; A3];
+ bineq = [b2; b3]; % row and col sums <= 1
+ A = ones(1,p1*p2);
+ b = nmatches; % total num matches = b (otherwise get degenerate soln)
+end
+
+
+ass = bintprog(c, Aineq, bineq, A, b);
+ass = reshape(ass, p1, p2);
+
+a = zeros(1, p1orig);
+for i=1:p1orig
+ ndx = find(ass(i,:)==1);
+ if ~isempty(ndx) & (ndx <= p2orig)
+ a(i) = ndx;
+ end
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/block.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/block.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function sub = block(blocks, block_sizes)
+% BLOCK Return a vector of subscripts corresponding to the specified blocks.
+% sub = block(blocks, block_sizes)
+%
+% e.g., block([2 5], [2 1 2 1 2]) = [3 7 8].
+
+blocks = blocks(:)';
+block_sizes = block_sizes(:)';
+skip = [0 cumsum(block_sizes)];
+start = skip(blocks)+1;
+fin = start + block_sizes(blocks) - 1;
+sub = [];
+for j=1:length(blocks)
+ sub = [sub start(j):fin(j)];
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/cell2matPad.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/cell2matPad.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function data2 = cell2matPad(data)
+% data{f}(y,x,b) - each frame can have a different size (can can even be empty)
+% data2(y,x,b,f) = zero padded version
+
+Nframes = length(data);
+Nbands = -inf;
+nr = -inf; nc = -inf;
+for f=1:Nframes
+ if isempty(data{f}), continue; end
+ nr = max(nr, size(data{f},1));
+ nc = max(nc, size(data{f},2));
+ Nbands = max(Nbands, size(data{f},3));
+end
+data2 = zeros(nr, nc, Nbands, Nframes);
+for f=1:Nframes
+ if isempty(data{f}), continue; end
+ data2(1:size(data{f},1), 1:size(data{f},2), :, f) = data{f};
+end
+if Nbands == 1
+ data2 = squeeze(data2); % reshape(data2, [size(data2,1), size(data2,2), Nframes]);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/cell2num.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/cell2num.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+function N = cell2num(C)
+% CELL2NUM Convert a 2D cell array to a 2D numeric array
+% N = cell2num(C)
+% If the cells contain column vectors, they must have the same number of rows in each row of C.
+% Each column will be concatenated.
+%
+% Example 1:
+% C = num2cell(rand(2,2))
+% [0.4565] [0.8214]
+% [0.0185] [0.4447]
+% N = cell2num(C)
+% 0.4565 0.8214
+% 0.0185 0.4447
+%
+% Example 2:
+% C = cell(2, 3);
+% for i=1:2
+% for j=1:3
+% C{i,j} = rand(i, 1);
+% end
+% end
+% C =
+% [ 0.8998] [ 0.8216] [ 0.6449]
+% [2x1 double] [2x1 double] [2x1 double]
+% C{2,1} =
+% 0.8180
+% 0.6602
+% N=cell2num(C)
+% 0.8998 0.8216 0.6449
+% 0.8180 0.3420 0.3412
+% 0.6602 0.2897 0.5341
+
+
+% error('use cell2mat in matlab 7')
+
+
+if isempty(C)
+ N = [];
+ return;
+end
+
+if any(cellfun('isempty', C)) %any(isemptycell(C))
+ error('can''t convert cell array with empty cells to matrix')
+end
+
+[nrows ncols] = size(C);
+%N = reshape(cat(1, C{:}), [nrows ncols]); % this only works if C only contains scalars
+r = 0;
+for i=1:nrows
+ r = r + size(C{i,1}, 1);
+end
+c = 0;
+for j=1:ncols
+ c = c + size(C{1,j}, 2);
+end
+N = reshape(cat(1, C{:}), [r c]);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/centeringMatrix.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/centeringMatrix.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+N = 3;
+x = rand(N,2); % each row is a feature vector
+m = mean(x,1);
+xc = x-repmat(m, N, 1);
+
+C = eye(N) - (1/N)*ones(N,N);
+xc2 = C*x;
+assert(approxeq(xc, xc2))
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/checkpsd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/checkpsd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function s = checkpsd(s)
+
+if (any(isnan(s) | isinf(s) | ~isreal(s)))
+ warning('S contains complex numbers, Inf, or NaN');
+end
+% Drop any negative eigenvalues.
+[V, D] = eig(full(s));
+d = real(diag(D));
+if (any(d < 0))
+ warning(sprintf(['S is not positive semidefinite (min. eig. =' ...
+ ' %0.5g); projecting.'], min(d)));
+ d(find(d < 0)) = 0;
+ D = diag(d);
+ s = V * D * V';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/chi2inv.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/chi2inv.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+function x = chi2inv(p,v);
+%CHI2INV Inverse of the chi-square cumulative distribution function (cdf).
+% X = CHI2INV(P,V) returns the inverse of the chi-square cdf with V
+% degrees of freedom at the values in P. The chi-square cdf with V
+% degrees of freedom, is the gamma cdf with parameters V/2 and 2.
+%
+% The size of X is the common size of P and V. A scalar input
+% functions as a constant matrix of the same size as the other input.
+
+% References:
+% [1] M. Abramowitz and I. A. Stegun, "Handbook of Mathematical
+% Functions", Government Printing Office, 1964, 26.4.
+% [2] E. Kreyszig, "Introductory Mathematical Statistics",
+% John Wiley, 1970, section 10.2 (page 144)
+
+% Copyright 1993-2002 The MathWorks, Inc.
+% $Revision: 1.1.1.1 $ $Date: 2005/04/26 02:30:30 $
+
+if nargin < 2,
+ error('Requires two input arguments.');
+end
+
+[errorcode p v] = distchck(2,p,v);
+
+if errorcode > 0
+ error('Requires non-scalar arguments to match in size.');
+end
+
+% Call the gamma inverse function.
+x = gaminv(p,v/2,2);
+
+% Return NaN if the degrees of freedom is not positive.
+k = (v <= 0);
+if any(k(:))
+ x(k) = NaN;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/choose.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/choose.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function c = choose(n,k)
+% CHOOSE The number of ways of choosing k things from n
+% c = choose(n,k)
+
+c = factorial(n)/(factorial(k) * factorial(n-k));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/collapse_mog.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/collapse_mog.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [new_mu, new_Sigma, new_Sigma2] = collapse_mog(mu, Sigma, coefs)
+% COLLAPSE_MOG Collapse a mixture of Gaussians to a single Gaussian by moment matching
+% [new_mu, new_Sigma] = collapse_mog(mu, Sigma, coefs)
+%
+% coefs(i) - weight of i'th mixture component
+% mu(:,i), Sigma(:,:,i) - params of i'th mixture component
+
+% S = sum_c w_c (S_c + m_c m_c' + m m' - 2 m_c m')
+% = sum_c w_c (S_c + m_c m_c') + m m' - 2 (sum_c m_c) m'
+% = sum_c w_c (S_c + m_c m_c') - m m'
+
+new_mu = sum(mu * diag(coefs), 2); % weighted sum of columns
+
+n = length(new_mu);
+new_Sigma = zeros(n,n);
+new_Sigma2 = zeros(n,n);
+for j=1:length(coefs)
+ m = mu(:,j) - new_mu;
+ new_Sigma = new_Sigma + coefs(j) * (Sigma(:,:,j) + m*m');
+ new_Sigma2 = new_Sigma2 + coefs(j) * (Sigma(:,:,j) + mu(:,j)*mu(:,j)');
+end
+%assert(approxeq(new_Sigma, new_Sigma2 - new_mu*new_mu'))
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/colmult.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/colmult.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+#include
+#include "mex.h"
+
+/*
+out = colop(M, v)
+
+Apply binary operator to a vector v and to each column of M in turn
+to produce a matrix the same size as M.
+
+This is equivalent to
+
+out = zeros(size(M));
+for col=1:size(M,2)
+ out(:,col) = op(M(:,col), v);
+end
+
+The code needs to be modified for each different operator 'op'.
+eg op = '.*'
+
+In vectorized form:
+
+out = M .* repmat(v(:), 1, size(M,2))
+
+(This function was formerly called repmat_and_mult.c)
+
+*/
+
+/* M(i,j) = M(i + nrows*j) since Matlab uses Fortran layout. */
+
+
+#define INMAT(i,j) M[(i)+nrows*(j)]
+#define OUTMAT(i,j) out[(i)+nrows*(j)]
+
+void mexFunction(
+ int nlhs, mxArray *plhs[],
+ int nrhs, const mxArray *prhs[]
+ )
+{
+ double *out, *M, *v;
+ int nrows, ncols, r, c;
+
+ /* read the input args */
+ M = mxGetPr(prhs[0]);
+ nrows = mxGetM(prhs[0]);
+ ncols = mxGetN(prhs[0]);
+
+ v = mxGetPr(prhs[1]);
+
+ plhs[0] = mxCreateDoubleMatrix(nrows, ncols, mxREAL);
+ out = mxGetPr(plhs[0]);
+
+ for (c=0; c < ncols; c++) {
+ for (r=0; r < nrows; r++) {
+ OUTMAT(r,c) = INMAT(r,c) * v[r];
+ /* printf("r=%d, c=%d, M=%f, v=%f\n", r, c, INMAT(r,c), v[r]); */
+ }
+ }
+
+}
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/computeROC.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/computeROC.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,64 @@
+function [FPrate, TPrate, AUC, thresholds] = computeROC(confidence, testClass)
+% function [FPrate, TPrate, AUC, thresholds] = computeROC(confidence, testClass)
+%
+% computeROC computes the data for an ROC curve based on a classifier's confidence output.
+% It returns the false positive rate and the true positive rate along with
+% the area under the ROC curve, and the list of thresholds.
+%
+% Inputs:
+% - confidence(i) is proportional to the probability that
+% testClass(i) is positive
+%
+% testClass = 0 => target absent
+% testClass = 1 => target present
+%
+% Based on algorithms 2 and 4 from Tom Fawcett's paper "ROC Graphs: Notes and
+% Practical Considerations for Data Mining Researchers" (2003)
+% http://www.hpl.hp.com/techreports/2003/HPL-2003-4.pdf"
+%
+% Vlad Magdin, 21 Feb 2005
+
+% break ties in scores
+S = rand('state');
+rand('state',0);
+confidence = confidence + rand(size(confidence))*10^(-10);
+rand('state',S)
+[thresholds order] = sort(confidence, 'descend');
+testClass = testClass(order);
+
+%%% -- calculate TP/FP rates and totals -- %%%
+AUC = 0;
+faCnt = 0;
+tpCnt = 0;
+falseAlarms = zeros(1,size(thresholds,2));
+detections = zeros(1,size(thresholds,2));
+fPrev = -inf;
+faPrev = 0;
+tpPrev = 0;
+
+P = max(size(find(testClass==1)));
+N = max(size(find(testClass==0)));
+
+for i=1:length(thresholds)
+ if thresholds(i) ~= fPrev
+ falseAlarms(i) = faCnt;
+ detections(i) = tpCnt;
+
+ AUC = AUC + polyarea([faPrev faPrev faCnt/N faCnt/N],[0 tpPrev tpCnt/P 0]);
+
+ fPrev = thresholds(i);
+ faPrev = faCnt/N;
+ tpPrev = tpCnt/P;
+ end
+
+ if testClass(i) == 1
+ tpCnt = tpCnt + 1;
+ else
+ faCnt = faCnt + 1;
+ end
+end
+
+AUC = AUC + polyarea([faPrev faPrev 1 1],[0 tpPrev 1 0]);
+
+FPrate = falseAlarms/N;
+TPrate = detections/P;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/compute_counts.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/compute_counts.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function count = compute_counts(data, sz)
+% COMPUTE_COUNTS Count the number of times each combination of discrete assignments occurs
+% count = compute_counts(data, sz)
+%
+% data(i,t) is the value of variable i in case t
+% sz(i) : values for variable i are assumed to be in [1:sz(i)]
+%
+% Example: to compute a transition matrix for an HMM from a sequence of labeled states:
+% transmat = mk_stochastic(compute_counts([seq(1:end-1); seq(2:end)], [nstates nstates]));
+
+assert(length(sz) == size(data, 1));
+P = prod(sz);
+indices = subv2ind(sz, data'); % each row of data' is a case
+%count = histc(indices, 1:P);
+count = hist(indices, 1:P);
+count = myreshape(count, sz);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/conf2mahal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/conf2mahal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+% CONF2MAHAL - Translates a confidence interval to a Mahalanobis
+% distance. Consider a multivariate Gaussian
+% distribution of the form
+%
+% p(x) = 1/sqrt((2 * pi)^d * det(C)) * exp((-1/2) * MD(x, m, inv(C)))
+%
+% where MD(x, m, P) is the Mahalanobis distance from x
+% to m under P:
+%
+% MD(x, m, P) = (x - m) * P * (x - m)'
+%
+% A particular Mahalanobis distance k identifies an
+% ellipsoid centered at the mean of the distribution.
+% The confidence interval associated with this ellipsoid
+% is the probability mass enclosed by it. Similarly,
+% a particular confidence interval uniquely determines
+% an ellipsoid with a fixed Mahalanobis distance.
+%
+% If X is an d dimensional Gaussian-distributed vector,
+% then the Mahalanobis distance of X is distributed
+% according to the Chi-squared distribution with d
+% degrees of freedom. Thus, the Mahalanobis distance is
+% determined by evaluating the inverse cumulative
+% distribution function of the chi squared distribution
+% up to the confidence value.
+%
+% Usage:
+%
+% m = conf2mahal(c, d);
+%
+% Inputs:
+%
+% c - the confidence interval
+% d - the number of dimensions of the Gaussian distribution
+%
+% Outputs:
+%
+% m - the Mahalanobis radius of the ellipsoid enclosing the
+% fraction c of the distribution's probability mass
+%
+% See also: MAHAL2CONF
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+function m = conf2mahal(c, d)
+
+m = chi2inv(c, d);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/cross_entropy.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/cross_entropy.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function kl = cross_entropy(p, q, symmetric)
+% CROSS_ENTROPY Compute the Kullback-Leibler divergence between two discrete prob. distributions
+% kl = cross_entropy(p, q, symmetric)
+%
+% If symmetric = 1, we compute the symmetric version. Default: symmetric = 0;
+
+tiny = exp(-700);
+if nargin < 3, symmetric = 0; end
+p = p(:);
+q = q(:);
+if symmetric
+ kl = (sum(p .* log((p+tiny)./(q+tiny))) + sum(q .* log((q+tiny)./(p+tiny))))/2;
+else
+ kl = sum(p .* log((p+tiny)./(q+tiny)));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/dirKPM.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/dirKPM.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,98 @@
+function filenames = dirKPM(dirname, ext, varargin)
+% dirKPM Like the built-in dir command, but returns filenames as a cell array instead of a struct
+%
+% filenames = dirKPM(dirname)
+% returns all files, except '.' and '..'
+%
+% filenames = dirKPM('images', '*.jpg')
+% returns files with this extension
+% eg filenames{1} = 'foo.jpg' etc
+%
+% OPTIONAL ARGUMENTS [default in brackets]
+% filenames = dirKPM('images', '', param1, val1, param2, val2, ...)
+%
+% 'fileType'='image' ['all'] means return files with extension .jpg, .png, .bmp
+%
+% 'prepend'=1 [0] means preprend folder name to filename
+% eg filenames{1} = 'images/foo.jpg'
+%
+% 'doSort'=1 [1] means sort filenames in ascending alphanumerical order (where possible)
+%
+% 'doRecurse'=1 [0] recursive dir, apply the same dirKPM call on all
+% subfolders (decrease MAXDEPTH option to prevent recursion from branching
+% too explosively)
+
+if nargin < 1, dirname = '.'; end
+
+if nargin < 2, ext = ''; end
+
+[fileType, prepend, doSort, doRecurse, MAXDEPTH, DEPTH] = process_options(...
+ varargin, 'fileType', 'all', 'prepend', 0, 'doSort', 1, 'doRecurse', 0,...
+ 'MAXDEPTH', 3, 'DEPTH', 0);
+
+tmp = dir(fullfile(dirname, ext));
+[filenames I] = setdiff({tmp.name}, {'.', '..'});
+tmp = tmp(I);
+
+if doRecurse && sum([tmp.isdir])>0 && DEPTH0
+ filenames(nfilenames+1:nfilenames+length(subDirFilenames)) = subDirFilenames;
+ end
+ end
+end
+
+nfiles = length(filenames);
+if nfiles==0 return; end
+
+switch fileType
+ case 'image',
+ for fi=1:nfiles
+ good(fi) = isImage(filenames{fi});
+ end
+ filenames = filenames(find(good));
+ case 'all',
+ % no-op
+ otherwise
+ error(sprintf('unrecognized file type %s', fileType));
+end
+
+if doSort
+% % sort filenames alphanumerically (if possible)
+% DJE, buggy, MUST save tmp.anr/snr/str or else we potentially lose
+% filenames
+% tmp = asort(filenames, '-s', 'ascend');
+% if ~isempty(tmp.anr)
+% filenames = tmp.anr';
+% else
+% filenames = tmp.str';
+% end
+ % if names could not be sorted, return original order
+
+ filenames=sort(filenames);
+
+end
+
+
+if prepend
+ nfiles = length(filenames);
+ for fi=1:nfiles
+ filenames{fi} = fullfile(dirname, filenames{fi});
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/div.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/div.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function d = div(a,b)
+% DIV Integer division
+% d = div(a,b)
+
+d = floor(a / b);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/draw_circle.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/draw_circle.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function h = draw_circle(x, r, outline_color, fill_color)
+% draw filled circles at centers x with radii r.
+% x is a matrix of columns. r is a row vector.
+
+n = 40; % resolution
+radians = [0:(2*pi)/(n-1):2*pi];
+unitC = [sin(radians); cos(radians)];
+
+% extend r if necessary
+if length(r) < cols(x)
+ r = [r repmat(r(length(r)), 1, cols(x)-length(r))];
+end
+
+h = [];
+% hold is needed for fill()
+held = ishold;
+hold on
+for i=1:cols(x)
+ y = unitC*r(i) + repmat(x(:, i), 1, n);
+ if nargin < 4
+ h = [h line(y(1,:), y(2,:), 'Color', outline_color)];
+ else
+ h = [h fill(y(1,:), y(2,:), fill_color, 'EdgeColor', outline_color)];
+ end
+end
+if ~held
+ hold off
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/draw_ellipse.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/draw_ellipse.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function h = draw_ellipse(x, c, outline_color, fill_color)
+% DRAW_ELLIPSE(x, c, outline_color, fill_color)
+% Draws ellipses at centers x with covariance matrix c.
+% x is a matrix of columns. c is a positive definite matrix.
+% outline_color and fill_color are optional.
+
+n = 40; % resolution
+radians = [0:(2*pi)/(n-1):2*pi];
+unitC = [sin(radians); cos(radians)];
+r = chol(c)';
+
+if nargin < 3
+ outline_color = 'g';
+end
+
+h = [];
+for i=1:cols(x)
+ y = r*unitC + repmat(x(:, i), 1, n);
+ if nargin < 4
+ h = [h line(y(1,:), y(2,:), 'Color', outline_color)];
+ else
+ h = [h fill(y(1,:), y(2,:), fill_color, 'EdgeColor', outline_color)];
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/draw_ellipse_axes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/draw_ellipse_axes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function h = draw_ellipse_axes(x, c, linespec)
+% DRAW_ELLIPSE_AXES(x, c, linespec)
+% Draws the major and minor axes of ellipses.
+% Ellipses are centered at x with covariance matrix c.
+% x is a matrix of columns. c is a positive definite matrix.
+% linespec is optional.
+
+[v,e] = eig(c);
+v = v*sqrt(e);
+
+h = [];
+for j = 1:cols(v)
+ x1 = repmat(x(1,:),2,1) + repmat([-1;1]*v(1,j),1,cols(x));
+ x2 = repmat(x(2,:),2,1) + repmat([-1;1]*v(2,j),1,cols(x));
+ h = [h line(x1,x2)];
+end
+if nargin > 2
+ set_linespec(h,linespec);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/em_converged.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/em_converged.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function [converged, decrease] = em_converged(loglik, previous_loglik, threshold, check_increased)
+% EM_CONVERGED Has EM converged?
+% [converged, decrease] = em_converged(loglik, previous_loglik, threshold)
+%
+% We have converged if the slope of the log-likelihood function falls below 'threshold',
+% i.e., |f(t) - f(t-1)| / avg < threshold,
+% where avg = (|f(t)| + |f(t-1)|)/2 and f(t) is log lik at iteration t.
+% 'threshold' defaults to 1e-4.
+%
+% This stopping criterion is from Numerical Recipes in C p423
+%
+% If we are doing MAP estimation (using priors), the likelihood can decrase,
+% even though the mode of the posterior is increasing.
+
+if nargin < 3, threshold = 1e-4; end
+if nargin < 4, check_increased = 1; end
+
+converged = 0;
+decrease = 0;
+
+if check_increased
+ if loglik - previous_loglik < -1e-3 % allow for a little imprecision
+ fprintf(1, '******likelihood decreased from %6.4f to %6.4f!\n', previous_loglik, loglik);
+ decrease = 1;
+converged = 0;
+return;
+ end
+end
+
+delta_loglik = abs(loglik - previous_loglik);
+avg_loglik = (abs(loglik) + abs(previous_loglik) + eps)/2;
+if (delta_loglik / avg_loglik) < threshold, converged = 1; end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/entropy.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/entropy.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function H = entropy(v, scale)
+% ENTROPY Entropy log base 2
+% H = entropy(v)
+% If v is a matrix, we compute the entropy of each column
+%
+% % H = entropy(v,1) means we scale the result so that it lies in [0,1]
+
+if nargin < 2, scale = 0; end
+
+v = v + (v==0);
+H = -1 * sum(v .* log2(v), 1); % sum the rows
+
+if scale
+ n = size(v, 1);
+ unif = normalise(ones(n,1));
+ H = H / entropy(unif);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/exportfig.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/exportfig.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,991 @@
+function varargout = exportfig(varargin)
+%EXPORTFIG Export a figure.
+% EXPORTFIG(H, FILENAME) writes the figure H to FILENAME. H is
+% a figure handle and FILENAME is a string that specifies the
+% name of the output file.
+%
+% EXPORTFIG(H, FILENAME, OPTIONS) writes the figure H to FILENAME
+% with options initially specified by the structure OPTIONS. The
+% field names of OPTIONS must be legal parameters listed below
+% and the field values must be legal values for the corresponding
+% parameter. Default options can be set in releases prior to R12
+% by storing the OPTIONS structure in the root object's appdata
+% with the command
+% setappdata(0,'exportfigdefaults', OPTIONS)
+% and for releases after R12 by setting the preference with the
+% command
+% setpref('exportfig', 'defaults', OPTIONS)
+%
+% EXPORTFIG(...,PARAM1,VAL1,PARAM2,VAL2,...) specifies
+% parameters that control various characteristics of the output
+% file. Any parameter value can be the string 'auto' which means
+% the parameter uses the default factory behavior, overriding
+% any other default for the parameter.
+%
+% Format Paramter:
+% 'Format' a string
+% specifies the output format. Defaults to 'eps'. For a
+% list of export formats type 'help print'.
+% 'Preview' one of the strings 'none', 'tiff'
+% specifies a preview for EPS files. Defaults to 'none'.
+%
+% Size Parameters:
+% 'Width' a positive scalar
+% specifies the width in the figure's PaperUnits
+% 'Height' a positive scalar
+% specifies the height in the figure's PaperUnits
+% 'Bounds' one of the strings 'tight', 'loose'
+% specifies a tight or loose bounding box. Defaults to 'tight'.
+% 'Reference' an axes handle or a string
+% specifies that the width and height parameters
+% are relative to the given axes. If a string is
+% specified then it must evaluate to an axes handle.
+%
+% Specifying only one dimension sets the other dimension
+% so that the exported aspect ratio is the same as the
+% figure's or reference axes' current aspect ratio.
+% If neither dimension is specified the size defaults to
+% the width and height from the figure's or reference
+% axes' size. Tight bounding boxes are only computed for
+% 2-D views and in that case the computed bounds enclose all
+% text objects.
+%
+% Rendering Parameters:
+% 'Color' one of the strings 'bw', 'gray', 'cmyk'
+% 'bw' specifies that lines and text are exported in
+% black and all other objects in grayscale
+% 'gray' specifies that all objects are exported in grayscale
+% 'rgb' specifies that all objects are exported in color
+% using the RGB color space
+% 'cmyk' specifies that all objects are exported in color
+% using the CMYK color space
+% 'Renderer' one of 'painters', 'zbuffer', 'opengl'
+% specifies the renderer to use
+% 'Resolution' a positive scalar
+% specifies the resolution in dots-per-inch.
+% 'LockAxes' one of 0 or 1
+% specifies that all axes limits and ticks should be fixed
+% while exporting.
+%
+% The default color setting is 'bw'.
+%
+% Font Parameters:
+% 'FontMode' one of the strings 'scaled', 'fixed'
+% 'FontSize' a positive scalar
+% in 'scaled' mode multiplies with the font size of each
+% text object to obtain the exported font size
+% in 'fixed' mode specifies the font size of all text
+% objects in points
+% 'DefaultFixedFontSize' a positive scalar
+% in 'fixed' mode specified the default font size in
+% points
+% 'FontSizeMin' a positive scalar
+% specifies the minimum font size allowed after scaling
+% 'FontSizeMax' a positive scalar
+% specifies the maximum font size allowed after scaling
+% 'FontEncoding' one of the strings 'latin1', 'adobe'
+% specifies the character encoding of the font
+% 'SeparateText' one of 0 or 1
+% specifies that the text objects are stored in separate
+% file as EPS with the base filename having '_t' appended.
+%
+% If FontMode is 'scaled' but FontSize is not specified then a
+% scaling factor is computed from the ratio of the size of the
+% exported figure to the size of the actual figure.
+%
+% The default 'FontMode' setting is 'scaled'.
+%
+% Line Width Parameters:
+% 'LineMode' one of the strings 'scaled', 'fixed'
+% 'LineWidth' a positive scalar
+% 'DefaultFixedLineWidth' a positive scalar
+% 'LineWidthMin' a positive scalar
+% specifies the minimum line width allowed after scaling
+% 'LineWidthMax' a positive scalar
+% specifies the maximum line width allowed after scaling
+% The semantics of 'Line' parameters are exactly the
+% same as the corresponding 'Font' parameters, except that
+% they apply to line widths instead of font sizes.
+%
+% Style Map Parameter:
+% 'LineStyleMap' one of [], 'bw', or a function name or handle
+% specifies how to map line colors to styles. An empty
+% style map means styles are not changed. The style map
+% 'bw' is a built-in mapping that maps lines with the same
+% color to the same style and otherwise cycles through the
+% available styles. A user-specified map is a function
+% that takes as input a cell array of line objects and
+% outputs a cell array of line style strings. The default
+% map is [].
+%
+% Examples:
+% exportfig(gcf,'fig1.eps','height',3);
+% Exports the current figure to the file named 'fig1.eps' with
+% a height of 3 inches (assuming the figure's PaperUnits is
+% inches) and an aspect ratio the same as the figure's aspect
+% ratio on screen.
+%
+% opts = struct('FontMode','fixed','FontSize',10,'height',3);
+% exportfig(gcf, 'fig2.eps', opts, 'height', 5);
+% Exports the current figure to 'fig2.eps' with all
+% text in 10 point fonts and with height 5 inches.
+%
+% See also PREVIEWFIG, APPLYTOFIG, RESTOREFIG, PRINT.
+
+% Copyright 2000 Ben Hinkle
+% Email bug reports and comments to bhinkle@mathworks.com
+
+if (nargin < 2)
+ error('Too few input arguments');
+end
+
+% exportfig(H, filename, [options,] ...)
+H = varargin{1};
+if ~LocalIsHG(H,'figure')
+ error('First argument must be a handle to a figure.');
+end
+filename = varargin{2};
+if ~ischar(filename)
+ error('Second argument must be a string.');
+end
+paramPairs = {varargin{3:end}};
+if nargin > 2
+ if isstruct(paramPairs{1})
+ pcell = LocalToCell(paramPairs{1});
+ paramPairs = {pcell{:}, paramPairs{2:end}};
+ end
+end
+verstr = version;
+majorver = str2num(verstr(1));
+defaults = [];
+if majorver > 5
+ if ispref('exportfig','defaults')
+ defaults = getpref('exportfig','defaults');
+ end
+elseif exist('getappdata')
+ defaults = getappdata(0,'exportfigdefaults');
+end
+if ~isempty(defaults)
+ dcell = LocalToCell(defaults);
+ paramPairs = {dcell{:}, paramPairs{:}};
+end
+
+% Do some validity checking on param-value pairs
+if (rem(length(paramPairs),2) ~= 0)
+ error(['Invalid input syntax. Optional parameters and values' ...
+ ' must be in pairs.']);
+end
+
+auto.format = 'eps';
+auto.preview = 'none';
+auto.width = -1;
+auto.height = -1;
+auto.color = 'bw';
+auto.defaultfontsize=10;
+auto.fontsize = -1;
+auto.fontmode='scaled';
+auto.fontmin = 8;
+auto.fontmax = 60;
+auto.defaultlinewidth = 1.0;
+auto.linewidth = -1;
+auto.linemode=[];
+auto.linemin = 0.5;
+auto.linemax = 100;
+auto.fontencoding = 'latin1';
+auto.renderer = [];
+auto.resolution = [];
+auto.stylemap = [];
+auto.applystyle = 0;
+auto.refobj = -1;
+auto.bounds = 'tight';
+explicitbounds = 0;
+auto.lockaxes = 1;
+auto.separatetext = 0;
+opts = auto;
+
+% Process param-value pairs
+args = {};
+for k = 1:2:length(paramPairs)
+ param = lower(paramPairs{k});
+ if ~ischar(param)
+ error('Optional parameter names must be strings');
+ end
+ value = paramPairs{k+1};
+
+ switch (param)
+ case 'format'
+ opts.format = LocalCheckAuto(lower(value),auto.format);
+ if strcmp(opts.format,'preview')
+ error(['Format ''preview'' no longer supported. Use PREVIEWFIG' ...
+ ' instead.']);
+ end
+ case 'preview'
+ opts.preview = LocalCheckAuto(lower(value),auto.preview);
+ if ~strcmp(opts.preview,{'none','tiff'})
+ error('Preview must be ''none'' or ''tiff''.');
+ end
+ case 'width'
+ opts.width = LocalToNum(value, auto.width);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~LocalIsPositiveScalar(opts.width)
+ error('Width must be a numeric scalar > 0');
+ end
+ end
+ case 'height'
+ opts.height = LocalToNum(value, auto.height);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if(~LocalIsPositiveScalar(opts.height))
+ error('Height must be a numeric scalar > 0');
+ end
+ end
+ case 'color'
+ opts.color = LocalCheckAuto(lower(value),auto.color);
+ if ~strcmp(opts.color,{'bw','gray','rgb','cmyk'})
+ error('Color must be ''bw'', ''gray'',''rgb'' or ''cmyk''.');
+ end
+ case 'fontmode'
+ opts.fontmode = LocalCheckAuto(lower(value),auto.fontmode);
+ if ~strcmp(opts.fontmode,{'scaled','fixed'})
+ error('FontMode must be ''scaled'' or ''fixed''.');
+ end
+ case 'fontsize'
+ opts.fontsize = LocalToNum(value,auto.fontsize);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~LocalIsPositiveScalar(opts.fontsize)
+ error('FontSize must be a numeric scalar > 0');
+ end
+ end
+ case 'defaultfixedfontsize'
+ opts.defaultfontsize = LocalToNum(value,auto.defaultfontsize);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~LocalIsPositiveScalar(opts.defaultfontsize)
+ error('DefaultFixedFontSize must be a numeric scalar > 0');
+ end
+ end
+ case 'fontsizemin'
+ opts.fontmin = LocalToNum(value,auto.fontmin);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~LocalIsPositiveScalar(opts.fontmin)
+ error('FontSizeMin must be a numeric scalar > 0');
+ end
+ end
+ case 'fontsizemax'
+ opts.fontmax = LocalToNum(value,auto.fontmax);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~LocalIsPositiveScalar(opts.fontmax)
+ error('FontSizeMax must be a numeric scalar > 0');
+ end
+ end
+ case 'fontencoding'
+ opts.fontencoding = LocalCheckAuto(lower(value),auto.fontencoding);
+ if ~strcmp(opts.fontencoding,{'latin1','adobe'})
+ error('FontEncoding must be ''latin1'' or ''adobe''.');
+ end
+ case 'linemode'
+ opts.linemode = LocalCheckAuto(lower(value),auto.linemode);
+ if ~strcmp(opts.linemode,{'scaled','fixed'})
+ error('LineMode must be ''scaled'' or ''fixed''.');
+ end
+ case 'linewidth'
+ opts.linewidth = LocalToNum(value,auto.linewidth);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~LocalIsPositiveScalar(opts.linewidth)
+ error('LineWidth must be a numeric scalar > 0');
+ end
+ end
+ case 'defaultfixedlinewidth'
+ opts.defaultlinewidth = LocalToNum(value,auto.defaultlinewidth);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~LocalIsPositiveScalar(opts.defaultlinewidth)
+ error(['DefaultFixedLineWidth must be a numeric scalar >' ...
+ ' 0']);
+ end
+ end
+ case 'linewidthmin'
+ opts.linemin = LocalToNum(value,auto.linemin);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~LocalIsPositiveScalar(opts.linemin)
+ error('LineWidthMin must be a numeric scalar > 0');
+ end
+ end
+ case 'linewidthmax'
+ opts.linemax = LocalToNum(value,auto.linemax);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~LocalIsPositiveScalar(opts.linemax)
+ error('LineWidthMax must be a numeric scalar > 0');
+ end
+ end
+ case 'linestylemap'
+ opts.stylemap = LocalCheckAuto(value,auto.stylemap);
+ case 'renderer'
+ opts.renderer = LocalCheckAuto(lower(value),auto.renderer);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~strcmp(opts.renderer,{'painters','zbuffer','opengl'})
+ error(['Renderer must be ''painters'', ''zbuffer'' or' ...
+ ' ''opengl''.']);
+ end
+ end
+ case 'resolution'
+ opts.resolution = LocalToNum(value,auto.resolution);
+ if ~ischar(value) | ~strcmp(value,'auto')
+ if ~(isnumeric(value) & (prod(size(value)) == 1) & (value >= 0));
+ error('Resolution must be a numeric scalar >= 0');
+ end
+ end
+ case 'applystyle' % means to apply the options and not export
+ opts.applystyle = 1;
+ case 'reference'
+ if ischar(value)
+ if strcmp(value,'auto')
+ opts.refobj = auto.refobj;
+ else
+ opts.refobj = eval(value);
+ end
+ else
+ opts.refobj = value;
+ end
+ if ~LocalIsHG(opts.refobj,'axes')
+ error('Reference object must evaluate to an axes handle.');
+ end
+ case 'bounds'
+ opts.bounds = LocalCheckAuto(lower(value),auto.bounds);
+ explicitbounds = 1;
+ if ~strcmp(opts.bounds,{'tight','loose'})
+ error('Bounds must be ''tight'' or ''loose''.');
+ end
+ case 'lockaxes'
+ opts.lockaxes = LocalToNum(value,auto.lockaxes);
+ case 'separatetext'
+ opts.separatetext = LocalToNum(value,auto.separatetext);
+ otherwise
+ error(['Unrecognized option ' param '.']);
+ end
+end
+
+% make sure figure is up-to-date
+drawnow;
+
+allLines = findall(H, 'type', 'line');
+allText = findall(H, 'type', 'text');
+allAxes = findall(H, 'type', 'axes');
+allImages = findall(H, 'type', 'image');
+allLights = findall(H, 'type', 'light');
+allPatch = findall(H, 'type', 'patch');
+allSurf = findall(H, 'type', 'surface');
+allRect = findall(H, 'type', 'rectangle');
+allFont = [allText; allAxes];
+allColor = [allLines; allText; allAxes; allLights];
+allMarker = [allLines; allPatch; allSurf];
+allEdge = [allPatch; allSurf];
+allCData = [allImages; allPatch; allSurf];
+
+old.objs = {};
+old.prop = {};
+old.values = {};
+
+% Process format
+if strncmp(opts.format,'eps',3) & ~strcmp(opts.preview,'none')
+ args = {args{:}, ['-' opts.preview]};
+end
+
+hadError = 0;
+oldwarn = warning;
+try
+
+ % lock axes limits, ticks and labels if requested
+ if opts.lockaxes
+ old = LocalManualAxesMode(old, allAxes, 'TickMode');
+ old = LocalManualAxesMode(old, allAxes, 'TickLabelMode');
+ old = LocalManualAxesMode(old, allAxes, 'LimMode');
+ end
+
+ % Process size parameters
+ figurePaperUnits = get(H, 'PaperUnits');
+ oldFigureUnits = get(H, 'Units');
+ oldFigPos = get(H,'Position');
+ set(H, 'Units', figurePaperUnits);
+ figPos = get(H,'Position');
+ refsize = figPos(3:4);
+ if opts.refobj ~= -1
+ oldUnits = get(opts.refobj, 'Units');
+ set(opts.refobj, 'Units', figurePaperUnits);
+ r = get(opts.refobj, 'Position');
+ refsize = r(3:4);
+ set(opts.refobj, 'Units', oldUnits);
+ end
+ aspectRatio = refsize(1)/refsize(2);
+ if (opts.width == -1) & (opts.height == -1)
+ opts.width = refsize(1);
+ opts.height = refsize(2);
+ elseif (opts.width == -1)
+ opts.width = opts.height * aspectRatio;
+ elseif (opts.height == -1)
+ opts.height = opts.width / aspectRatio;
+ end
+ wscale = opts.width/refsize(1);
+ hscale = opts.height/refsize(2);
+ sizescale = min(wscale,hscale);
+ old = LocalPushOldData(old,H,'PaperPositionMode', ...
+ get(H,'PaperPositionMode'));
+ set(H, 'PaperPositionMode', 'auto');
+ newPos = [figPos(1) figPos(2)+figPos(4)*(1-hscale) ...
+ wscale*figPos(3) hscale*figPos(4)];
+ set(H, 'Position', newPos);
+ set(H, 'Units', oldFigureUnits);
+
+ % process line-style map
+ if ~isempty(opts.stylemap) & ~isempty(allLines)
+ oldlstyle = LocalGetAsCell(allLines,'LineStyle');
+ old = LocalPushOldData(old, allLines, {'LineStyle'}, ...
+ oldlstyle);
+ newlstyle = oldlstyle;
+ if ischar(opts.stylemap) & strcmpi(opts.stylemap,'bw')
+ newlstyle = LocalMapColorToStyle(allLines);
+ else
+ try
+ newlstyle = feval(opts.stylemap,allLines);
+ catch
+ warning(['Skipping stylemap. ' lasterr]);
+ end
+ end
+ set(allLines,{'LineStyle'},newlstyle);
+ end
+
+ % Process rendering parameters
+ switch (opts.color)
+ case {'bw', 'gray'}
+ if ~strcmp(opts.color,'bw') & strncmp(opts.format,'eps',3)
+ opts.format = [opts.format 'c'];
+ end
+ args = {args{:}, ['-d' opts.format]};
+
+ %compute and set gray colormap
+ oldcmap = get(H,'Colormap');
+ newgrays = 0.30*oldcmap(:,1) + 0.59*oldcmap(:,2) + 0.11*oldcmap(:,3);
+ newcmap = [newgrays newgrays newgrays];
+ old = LocalPushOldData(old, H, 'Colormap', oldcmap);
+ set(H, 'Colormap', newcmap);
+
+ %compute and set ColorSpec and CData properties
+ old = LocalUpdateColors(allColor, 'color', old);
+ old = LocalUpdateColors(allAxes, 'xcolor', old);
+ old = LocalUpdateColors(allAxes, 'ycolor', old);
+ old = LocalUpdateColors(allAxes, 'zcolor', old);
+ old = LocalUpdateColors(allMarker, 'MarkerEdgeColor', old);
+ old = LocalUpdateColors(allMarker, 'MarkerFaceColor', old);
+ old = LocalUpdateColors(allEdge, 'EdgeColor', old);
+ old = LocalUpdateColors(allEdge, 'FaceColor', old);
+ old = LocalUpdateColors(allCData, 'CData', old);
+
+ case {'rgb','cmyk'}
+ if strncmp(opts.format,'eps',3)
+ opts.format = [opts.format 'c'];
+ args = {args{:}, ['-d' opts.format]};
+ if strcmp(opts.color,'cmyk')
+ args = {args{:}, '-cmyk'};
+ end
+ else
+ args = {args{:}, ['-d' opts.format]};
+ end
+ otherwise
+ error('Invalid Color parameter');
+ end
+ if (~isempty(opts.renderer))
+ args = {args{:}, ['-' opts.renderer]};
+ end
+ if (~isempty(opts.resolution)) | ~strncmp(opts.format,'eps',3)
+ if isempty(opts.resolution)
+ opts.resolution = 0;
+ end
+ args = {args{:}, ['-r' int2str(opts.resolution)]};
+ end
+
+ % Process font parameters
+ if ~isempty(opts.fontmode)
+ oldfonts = LocalGetAsCell(allFont,'FontSize');
+ oldfontunits = LocalGetAsCell(allFont,'FontUnits');
+ set(allFont,'FontUnits','points');
+ switch (opts.fontmode)
+ case 'fixed'
+ if (opts.fontsize == -1)
+ set(allFont,'FontSize',opts.defaultfontsize);
+ else
+ set(allFont,'FontSize',opts.fontsize);
+ end
+ case 'scaled'
+ if (opts.fontsize == -1)
+ scale = sizescale;
+ else
+ scale = opts.fontsize;
+ end
+ newfonts = LocalScale(oldfonts,scale,opts.fontmin,opts.fontmax);
+ set(allFont,{'FontSize'},newfonts);
+ otherwise
+ error('Invalid FontMode parameter');
+ end
+ old = LocalPushOldData(old, allFont, {'FontSize'}, oldfonts);
+ old = LocalPushOldData(old, allFont, {'FontUnits'}, oldfontunits);
+ end
+ if strcmp(opts.fontencoding,'adobe') & strncmp(opts.format,'eps',3)
+ args = {args{:}, '-adobecset'};
+ end
+
+ % Process line parameters
+ if ~isempty(opts.linemode)
+ oldlines = LocalGetAsCell(allMarker,'LineWidth');
+ old = LocalPushOldData(old, allMarker, {'LineWidth'}, oldlines);
+ switch (opts.linemode)
+ case 'fixed'
+ if (opts.linewidth == -1)
+ set(allMarker,'LineWidth',opts.defaultlinewidth);
+ else
+ set(allMarker,'LineWidth',opts.linewidth);
+ end
+ case 'scaled'
+ if (opts.linewidth == -1)
+ scale = sizescale;
+ else
+ scale = opts.linewidth;
+ end
+ newlines = LocalScale(oldlines, scale, opts.linemin, opts.linemax);
+ set(allMarker,{'LineWidth'},newlines);
+ end
+ end
+
+ % adjust figure bounds to surround axes
+ if strcmp(opts.bounds,'tight')
+ if (~strncmp(opts.format,'eps',3) & LocalHas3DPlot(allAxes)) | ...
+ (strncmp(opts.format,'eps',3) & opts.separatetext)
+ if (explicitbounds == 1)
+ warning(['Cannot compute ''tight'' bounds. Using ''loose''' ...
+ ' bounds.']);
+ end
+ opts.bounds = 'loose';
+ end
+ end
+ warning('off');
+ if ~isempty(allAxes)
+ if strncmp(opts.format,'eps',3)
+ if strcmp(opts.bounds,'loose')
+ args = {args{:}, '-loose'};
+ end
+ old = LocalPushOldData(old,H,'Position', oldFigPos);
+ elseif strcmp(opts.bounds,'tight')
+ oldaunits = LocalGetAsCell(allAxes,'Units');
+ oldapos = LocalGetAsCell(allAxes,'Position');
+ oldtunits = LocalGetAsCell(allText,'units');
+ oldtpos = LocalGetAsCell(allText,'Position');
+ set(allAxes,'units','points');
+ apos = LocalGetAsCell(allAxes,'Position');
+ oldunits = get(H,'Units');
+ set(H,'units','points');
+ origfr = get(H,'position');
+ fr = [];
+ for k=1:length(allAxes)
+ if ~strcmpi(get(allAxes(k),'Tag'),'legend')
+ axesR = apos{k};
+ r = LocalAxesTightBoundingBox(axesR, allAxes(k));
+ r(1:2) = r(1:2) + axesR(1:2);
+ fr = LocalUnionRect(fr,r);
+ end
+ end
+ if isempty(fr)
+ fr = [0 0 origfr(3:4)];
+ end
+ for k=1:length(allAxes)
+ ax = allAxes(k);
+ r = apos{k};
+ r(1:2) = r(1:2) - fr(1:2);
+ set(ax,'Position',r);
+ end
+ old = LocalPushOldData(old, allAxes, {'Position'}, oldapos);
+ old = LocalPushOldData(old, allText, {'Position'}, oldtpos);
+ old = LocalPushOldData(old, allText, {'Units'}, oldtunits);
+ old = LocalPushOldData(old, allAxes, {'Units'}, oldaunits);
+ old = LocalPushOldData(old, H, 'Position', oldFigPos);
+ old = LocalPushOldData(old, H, 'Units', oldFigureUnits);
+ r = [origfr(1) origfr(2)+origfr(4)-fr(4) fr(3:4)];
+ set(H,'Position',r);
+ else
+ args = {args{:}, '-loose'};
+ old = LocalPushOldData(old,H,'Position', oldFigPos);
+ end
+ end
+
+ % Process text in a separate file if needed
+ if opts.separatetext & ~opts.applystyle
+ % First hide all text and export
+ oldtvis = LocalGetAsCell(allText,'visible');
+ set(allText,'visible','off');
+ oldax = LocalGetAsCell(allAxes,'XTickLabel',1);
+ olday = LocalGetAsCell(allAxes,'YTickLabel',1);
+ oldaz = LocalGetAsCell(allAxes,'ZTickLabel',1);
+ null = cell(length(oldax),1);
+ [null{:}] = deal([]);
+ set(allAxes,{'XTickLabel'},null);
+ set(allAxes,{'YTickLabel'},null);
+ set(allAxes,{'ZTickLabel'},null);
+ print(H, filename, args{:});
+ set(allText,{'Visible'},oldtvis);
+ set(allAxes,{'XTickLabel'},oldax);
+ set(allAxes,{'YTickLabel'},olday);
+ set(allAxes,{'ZTickLabel'},oldaz);
+ % Now hide all non-text and export as eps in painters
+ [path, name, ext] = fileparts(filename);
+ tfile = fullfile(path,[name '_t.eps']);
+ tfile2 = fullfile(path,[name '_t2.eps']);
+ foundRenderer = 0;
+ for k=1:length(args)
+ if strncmp('-d',args{k},2)
+ args{k} = '-deps';
+ elseif strncmp('-zbuffer',args{k},8) | ...
+ strncmp('-opengl', args{k},6)
+ args{k} = '-painters';
+ foundRenderer = 1;
+ end
+ end
+ if ~foundRenderer
+ args = {args{:}, '-painters'};
+ end
+ allNonText = [allLines; allLights; allPatch; ...
+ allImages; allSurf; allRect];
+ oldvis = LocalGetAsCell(allNonText,'visible');
+ oldc = LocalGetAsCell(allAxes,'color');
+ oldaxg = LocalGetAsCell(allAxes,'XGrid');
+ oldayg = LocalGetAsCell(allAxes,'YGrid');
+ oldazg = LocalGetAsCell(allAxes,'ZGrid');
+ [null{:}] = deal('off');
+ set(allAxes,{'XGrid'},null);
+ set(allAxes,{'YGrid'},null);
+ set(allAxes,{'ZGrid'},null);
+ set(allNonText,'Visible','off');
+ set(allAxes,'Color','none');
+ print(H, tfile2, args{:});
+ set(allNonText,{'Visible'},oldvis);
+ set(allAxes,{'Color'},oldc);
+ set(allAxes,{'XGrid'},oldaxg);
+ set(allAxes,{'YGrid'},oldayg);
+ set(allAxes,{'ZGrid'},oldazg);
+ %hack up the postscript file
+ fid1 = fopen(tfile,'w');
+ fid2 = fopen(tfile2,'r');
+ line = fgetl(fid2);
+ while ischar(line)
+ if strncmp(line,'%%Title',7)
+ fprintf(fid1,'%s\n',['%%Title: ', tfile]);
+ elseif (length(line) < 3)
+ fprintf(fid1,'%s\n',line);
+ elseif ~strcmp(line(end-2:end),' PR') & ...
+ ~strcmp(line(end-1:end),' L')
+ fprintf(fid1,'%s\n',line);
+ end
+ line = fgetl(fid2);
+ end
+ fclose(fid1);
+ fclose(fid2);
+ delete(tfile2);
+
+ elseif ~opts.applystyle
+ drawnow;
+ print(H, filename, args{:});
+ end
+ warning(oldwarn);
+
+catch
+ warning(oldwarn);
+ hadError = 1;
+end
+
+% Restore figure settings
+if opts.applystyle
+ varargout{1} = old;
+else
+ for n=1:length(old.objs)
+ if ~iscell(old.values{n}) & iscell(old.prop{n})
+ old.values{n} = {old.values{n}};
+ end
+ set(old.objs{n}, old.prop{n}, old.values{n});
+ end
+end
+
+if hadError
+ error(deblank(lasterr));
+end
+
+%
+% Local Functions
+%
+
+function outData = LocalPushOldData(inData, objs, prop, values)
+outData.objs = {objs, inData.objs{:}};
+outData.prop = {prop, inData.prop{:}};
+outData.values = {values, inData.values{:}};
+
+function cellArray = LocalGetAsCell(fig,prop,allowemptycell);
+cellArray = get(fig,prop);
+if nargin < 3
+ allowemptycell = 0;
+end
+if ~iscell(cellArray) & (allowemptycell | ~isempty(cellArray))
+ cellArray = {cellArray};
+end
+
+function newArray = LocalScale(inArray, scale, minv, maxv)
+n = length(inArray);
+newArray = cell(n,1);
+for k=1:n
+ newArray{k} = min(maxv,max(minv,scale*inArray{k}(1)));
+end
+
+function gray = LocalMapToGray1(color)
+gray = color;
+if ischar(color)
+ switch color(1)
+ case 'y'
+ color = [1 1 0];
+ case 'm'
+ color = [1 0 1];
+ case 'c'
+ color = [0 1 1];
+ case 'r'
+ color = [1 0 0];
+ case 'g'
+ color = [0 1 0];
+ case 'b'
+ color = [0 0 1];
+ case 'w'
+ color = [1 1 1];
+ case 'k'
+ color = [0 0 0];
+ end
+end
+if ~ischar(color)
+ gray = 0.30*color(1) + 0.59*color(2) + 0.11*color(3);
+end
+
+function newArray = LocalMapToGray(inArray);
+n = length(inArray);
+newArray = cell(n,1);
+for k=1:n
+ color = inArray{k};
+ if ~isempty(color)
+ color = LocalMapToGray1(color);
+ end
+ if isempty(color) | ischar(color)
+ newArray{k} = color;
+ else
+ newArray{k} = [color color color];
+ end
+end
+
+function newArray = LocalMapColorToStyle(inArray);
+inArray = LocalGetAsCell(inArray,'Color');
+n = length(inArray);
+newArray = cell(n,1);
+styles = {'-','--',':','-.'};
+uniques = [];
+nstyles = length(styles);
+for k=1:n
+ gray = LocalMapToGray1(inArray{k});
+ if isempty(gray) | ischar(gray) | gray < .05
+ newArray{k} = '-';
+ else
+ if ~isempty(uniques) & any(gray == uniques)
+ ind = find(gray==uniques);
+ else
+ uniques = [uniques gray];
+ ind = length(uniques);
+ end
+ newArray{k} = styles{mod(ind-1,nstyles)+1};
+ end
+end
+
+function newArray = LocalMapCData(inArray);
+n = length(inArray);
+newArray = cell(n,1);
+for k=1:n
+ color = inArray{k};
+ if (ndims(color) == 3) & isa(color,'double')
+ gray = 0.30*color(:,:,1) + 0.59*color(:,:,2) + 0.11*color(:,:,3);
+ color(:,:,1) = gray;
+ color(:,:,2) = gray;
+ color(:,:,3) = gray;
+ end
+ newArray{k} = color;
+end
+
+function outData = LocalUpdateColors(inArray, prop, inData)
+value = LocalGetAsCell(inArray,prop);
+outData.objs = {inData.objs{:}, inArray};
+outData.prop = {inData.prop{:}, {prop}};
+outData.values = {inData.values{:}, value};
+if (~isempty(value))
+ if strcmp(prop,'CData')
+ value = LocalMapCData(value);
+ else
+ value = LocalMapToGray(value);
+ end
+ set(inArray,{prop},value);
+end
+
+function bool = LocalIsPositiveScalar(value)
+bool = isnumeric(value) & ...
+ prod(size(value)) == 1 & ...
+ value > 0;
+
+function value = LocalToNum(value,auto)
+if ischar(value)
+ if strcmp(value,'auto')
+ value = auto;
+ else
+ value = str2num(value);
+ end
+end
+
+%convert a struct to {field1,val1,field2,val2,...}
+function c = LocalToCell(s)
+f = fieldnames(s);
+v = struct2cell(s);
+opts = cell(2,length(f));
+opts(1,:) = f;
+opts(2,:) = v;
+c = {opts{:}};
+
+function c = LocalIsHG(obj,hgtype)
+c = 0;
+if (length(obj) == 1) & ishandle(obj)
+ c = strcmp(get(obj,'type'),hgtype);
+end
+
+function c = LocalHas3DPlot(a)
+zticks = LocalGetAsCell(a,'ZTickLabel');
+c = 0;
+for k=1:length(zticks)
+ if ~isempty(zticks{k})
+ c = 1;
+ return;
+ end
+end
+
+function r = LocalUnionRect(r1,r2)
+if isempty(r1)
+ r = r2;
+elseif isempty(r2)
+ r = r1;
+elseif max(r2(3:4)) > 0
+ left = min(r1(1),r2(1));
+ bot = min(r1(2),r2(2));
+ right = max(r1(1)+r1(3),r2(1)+r2(3));
+ top = max(r1(2)+r1(4),r2(2)+r2(4));
+ r = [left bot right-left top-bot];
+else
+ r = r1;
+end
+
+function c = LocalLabelsMatchTicks(labs,ticks)
+c = 0;
+try
+ t1 = num2str(ticks(1));
+ n = length(ticks);
+ tend = num2str(ticks(n));
+ c = strncmp(labs(1),t1,length(labs(1))) & ...
+ strncmp(labs(n),tend,length(labs(n)));
+end
+
+function r = LocalAxesTightBoundingBox(axesR, a)
+r = [];
+atext = findall(a,'type','text','visible','on');
+if ~isempty(atext)
+ set(atext,'units','points');
+ res=LocalGetAsCell(atext,'extent');
+ for n=1:length(atext)
+ r = LocalUnionRect(r,res{n});
+ end
+end
+if strcmp(get(a,'visible'),'on')
+ r = LocalUnionRect(r,[0 0 axesR(3:4)]);
+ oldunits = get(a,'fontunits');
+ set(a,'fontunits','points');
+ label = text(0,0,'','parent',a,...
+ 'units','points',...
+ 'fontsize',get(a,'fontsize'),...
+ 'fontname',get(a,'fontname'),...
+ 'fontweight',get(a,'fontweight'),...
+ 'fontangle',get(a,'fontangle'),...
+ 'visible','off');
+ fs = get(a,'fontsize');
+
+ % handle y axis tick labels
+ ry = [0 -fs/2 0 axesR(4)+fs];
+ ylabs = get(a,'yticklabels');
+ yticks = get(a,'ytick');
+ maxw = 0;
+ if ~isempty(ylabs)
+ for n=1:size(ylabs,1)
+ set(label,'string',ylabs(n,:));
+ ext = get(label,'extent');
+ maxw = max(maxw,ext(3));
+ end
+ if ~LocalLabelsMatchTicks(ylabs,yticks) & ...
+ strcmp(get(a,'xaxislocation'),'bottom')
+ ry(4) = ry(4) + 1.5*ext(4);
+ end
+ if strcmp(get(a,'yaxislocation'),'left')
+ ry(1) = -(maxw+5);
+ else
+ ry(1) = axesR(3);
+ end
+ ry(3) = maxw+5;
+ r = LocalUnionRect(r,ry);
+ end
+
+ % handle x axis tick labels
+ rx = [0 0 0 fs+5];
+ xlabs = get(a,'xticklabels');
+ xticks = get(a,'xtick');
+ if ~isempty(xlabs)
+ if strcmp(get(a,'xaxislocation'),'bottom')
+ rx(2) = -(fs+5);
+ if ~LocalLabelsMatchTicks(xlabs,xticks);
+ rx(4) = rx(4) + 2*fs;
+ rx(2) = rx(2) - 2*fs;
+ end
+ else
+ rx(2) = axesR(4);
+ % exponent is still below axes
+ if ~LocalLabelsMatchTicks(xlabs,xticks);
+ rx(4) = rx(4) + axesR(4) + 2*fs;
+ rx(2) = -2*fs;
+ end
+ end
+ set(label,'string',xlabs(1,:));
+ ext1 = get(label,'extent');
+ rx(1) = -ext1(3)/2;
+ set(label,'string',xlabs(size(xlabs,1),:));
+ ext2 = get(label,'extent');
+ rx(3) = axesR(3) + (ext2(3) + ext1(3))/2;
+ r = LocalUnionRect(r,rx);
+ end
+ set(a,'fontunits',oldunits);
+ delete(label);
+end
+
+function c = LocalManualAxesMode(old, allAxes, base)
+xs = ['X' base];
+ys = ['Y' base];
+zs = ['Z' base];
+oldXMode = LocalGetAsCell(allAxes,xs);
+oldYMode = LocalGetAsCell(allAxes,ys);
+oldZMode = LocalGetAsCell(allAxes,zs);
+old = LocalPushOldData(old, allAxes, {xs}, oldXMode);
+old = LocalPushOldData(old, allAxes, {ys}, oldYMode);
+old = LocalPushOldData(old, allAxes, {zs}, oldZMode);
+set(allAxes,xs,'manual');
+set(allAxes,ys,'manual');
+set(allAxes,zs,'manual');
+c = old;
+
+function val = LocalCheckAuto(val, auto)
+if ischar(val) & strcmp(val,'auto')
+ val = auto;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/extend_domain_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/extend_domain_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function B = extend_domain_table(A, smalldom, smallsz, bigdom, bigsz)
+% EXTEND_DOMAIN_TABLE Expand an array so it has the desired size.
+% B = extend_domain_table(A, smalldom, smallsz, bigdom, bigsz)
+%
+% A is the array with domain smalldom and sizes smallsz.
+% bigdom is the desired domain, with sizes bigsz.
+%
+% Example:
+% smalldom = [1 3], smallsz = [2 4], bigdom = [1 2 3 4], bigsz = [2 1 4 5],
+% so B(i,j,k,l) = A(i,k) for i in 1:2, j in 1:1, k in 1:4, l in 1:5
+
+if isequal(size(A), [1 1]) % a scalar
+ B = A; % * myones(bigsz);
+ return;
+end
+
+map = find_equiv_posns(smalldom, bigdom);
+sz = ones(1, length(bigdom));
+sz(map) = smallsz;
+B = myreshape(A, sz); % add dimensions for the stuff not in A
+sz = bigsz;
+sz(map) = 1; % don't replicate along A's dimensions
+B = myrepmat(B, sz(:)');
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/factorial.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/factorial.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function x = factorial(n)
+% FACTORIAL Compute n!
+% x = factorial(n)
+
+if n == 0
+ x = 1;
+else
+ x = n*factorial(n-1);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/filepartsLast.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/filepartsLast.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function [last] = filepartsLast(fname)
+% filepartsLast Return the last part of a filename (strip off directory and suffix)
+% function filepartsLast(fname)
+%
+% Examples
+% filepartsLast('C:/foo/bar') = 'bar'
+% filepartsLast('C:/foo/bar.mat') = 'bar'
+% filepartsLast('C:/foo/bar.mat.gz') = 'bar.mat'
+% filepartsLast('bar.mat') = 'bar'
+
+[pathstr,name,ext,versn] = fileparts(fname);
+last = name;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/find_equiv_posns.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/find_equiv_posns.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function p = find_equiv_posns(vsmall, vlarge)
+% FIND_EQUIV_POSNS p[i] = the place where vsmall[i] occurs in vlarge.
+% p = find_equiv_posns(vsmall, vlarge)
+% THE VECTORS ARE ASSUMED TO BE SORTED.
+%
+% e.g., vsmall=[2,8], vlarge=[2,7,8,4], p=[1,3]
+%
+% In R/S, this function is called 'match'
+
+%if ~mysubset(vsmall, vlarge)
+% error('small domain must occur in large domain');
+%end
+
+if isempty(vsmall) | isempty(vlarge)
+ p = [];
+ return;
+end
+
+bitvec = sparse(1, max(vlarge));
+%bitvec = zeros(1, max(vlarge));
+bitvec(vsmall) = 1;
+p = find(bitvec(vlarge));
+
+%p = find(ismember(vlarge, vsmall)); % slower
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/fullfileKPM.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/fullfileKPM.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function f = fullfileKPM(varargin)
+% fullfileKPM Concatenate strings with file separator, then convert it to a/b/c
+% function f = fullfileKPM(varargin)
+
+f = fullfile(varargin{:});
+f = strrep(f, '\', '/');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/genpathKPM.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/genpathKPM.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+function p = genpathKPM(d)
+% genpathKPM Like built-in genpath, but omits directories whose name is 'Old', 'old' or 'CVS'
+% function p = genpathKPM(d)
+
+if nargin==0,
+ p = genpath(fullfile(matlabroot,'toolbox'));
+ if length(p) > 1, p(end) = []; end % Remove trailing pathsep
+ return
+end
+
+% initialise variables
+methodsep = '@'; % qualifier for overloaded method directories
+p = ''; % path to be returned
+
+% Generate path based on given root directory
+files = dir(d);
+if isempty(files)
+ return
+end
+
+% Add d to the path even if it is empty.
+p = [p d pathsep];
+
+% set logical vector for subdirectory entries in d
+isdir = logical(cat(1,files.isdir));
+%
+% Recursively descend through directories which are neither
+% private nor "class" directories.
+%
+dirs = files(isdir); % select only directory entries from the current listing
+
+for i=1:length(dirs)
+ dirname = dirs(i).name;
+ if ~strcmp( dirname,'.') & ...
+ ~strcmp( dirname,'..') & ...
+ ~strncmp( dirname,methodsep,1)& ...
+ ~strcmp( dirname,'private') & ...
+ ~strcmp( dirname, 'old') & ... % KPM
+ ~strcmp( dirname, 'Old') & ... % KPM
+ ~strcmp( dirname, 'CVS') % KPM
+ p = [p genpathKPM(fullfile(d,dirname))]; % recursive calling of this function.
+ end
+end
+
+%------------------------------------------------------------------------------
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/hash_add.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/hash_add.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function hash_add(key, val, fname)
+% HASH_ADD Append key,value pair to end of hashtable stored in a file
+% function hash_add(key, val, filename)
+%
+% See hash_lookup for an example
+
+if ~exist(fname, 'file')
+ % new hashtable
+ hashtable.key{1} = key;
+ hashtable.value{1} = val;
+else
+ %hashtable = importdata(fname);
+ %hashtable = load(fname, '-mat');
+ load(fname, '-mat');
+ Nentries = length(hashtable.key);
+ hashtable.key{Nentries+1} = key;
+ hashtable.value{Nentries+1} = val;
+end
+save(fname, 'hashtable', '-mat');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/hash_del.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/hash_del.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function ndx = hash_del(key, fname)
+% HASH_DEL Remove all entries that match key from hashtable stored in a file
+% ndx = hash_del(key, fname)
+%
+% Returns indices of matching entries (if any)
+% See hash_lookup for an example
+
+ndx = [];
+
+if ~exist(fname, 'file')
+ % new hashtable - no op
+else
+ %hashtable = importdata(fname);
+ %hashtable = load(fname, '-mat');
+ load(fname, '-mat');
+ Nentries = length(hashtable.key);
+ for i=1:Nentries
+ if isequal(hashtable.key{i}, key)
+ ndx = [ndx i];
+ end
+ end
+ hashtable.key(ndx) = [];
+ hashtable.value(ndx) = [];
+ save(fname, 'hashtable', '-mat');
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/hash_lookup.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/hash_lookup.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function [val, found, Nentries] = hash_lookup(key, fname)
+% HASH_LOOKUP Lookup a key in a hash table stored in a file using linear search
+% function [val, found, Nentries] = hash_lookup(key, filename)
+%
+% Example:
+% If htbl.mat does not exist,
+% [val,found,N] = hash_lookup('foo', 'htbl')
+% returns found val = [], found = 0, N = 0
+% hash_add('foo', 42, 'htbl')
+% hash_add('bar', [1:10], 'htbl')
+% [val,found,N] = hash_lookup('foo', 'htbl')
+% now returns val = 42, found = 1, N = 2
+%
+% Type 'delete htbl' to delete the file/ reset the hashtable
+
+
+val = [];
+found = 0;
+
+if exist(fname, 'file')==0
+ % new hashtable
+ Nentries = 0;
+else
+ %hashtable = importdata(fname);
+ load(fname);
+ Nentries = length(hashtable.key);
+ for i=1:Nentries
+ if isequal(hashtable.key{i}, key)
+ val = hashtable.value{i};
+ found = 1;
+ break;
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/hsvKPM.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/hsvKPM.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function colors = hsvKPM(N)
+% hsvKPM Like built-in HSV, except it randomizes the order, so that adjacent colors are dis-similar
+% function colors = hsvKPM(N)
+
+colors = hsv(N);
+perm = randperm(N);
+colors = colors(perm,:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/hungarian.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/hungarian.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,464 @@
+function [C,T]=hungarian(A)
+%HUNGARIAN Solve the Assignment problem using the Hungarian method.
+%
+%[C,T]=hungarian(A)
+%A - a square cost matrix.
+%C - the optimal assignment.
+%T - the cost of the optimal assignment.
+
+% Adapted from the FORTRAN IV code in Carpaneto and Toth, "Algorithm 548:
+% Solution of the assignment problem [H]", ACM Transactions on
+% Mathematical Software, 6(1):104-111, 1980.
+
+% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se.
+% Department of Computing Science, Umeå University,
+% Sweden.
+% All standard disclaimers apply.
+
+% A substantial effort was put into this code. If you use it for a
+% publication or otherwise, please include an acknowledgement or at least
+% notify me by email. /Niclas
+
+[m,n]=size(A);
+
+if (m~=n)
+ error('HUNGARIAN: Cost matrix must be square!');
+end
+
+% Save original cost matrix.
+orig=A;
+
+% Reduce matrix.
+A=hminired(A);
+
+% Do an initial assignment.
+[A,C,U]=hminiass(A);
+
+% Repeat while we have unassigned rows.
+while (U(n+1))
+ % Start with no path, no unchecked zeros, and no unexplored rows.
+ LR=zeros(1,n);
+ LC=zeros(1,n);
+ CH=zeros(1,n);
+ RH=[zeros(1,n) -1];
+
+ % No labelled columns.
+ SLC=[];
+
+ % Start path in first unassigned row.
+ r=U(n+1);
+ % Mark row with end-of-path label.
+ LR(r)=-1;
+ % Insert row first in labelled row set.
+ SLR=r;
+
+ % Repeat until we manage to find an assignable zero.
+ while (1)
+ % If there are free zeros in row r
+ if (A(r,n+1)~=0)
+ % ...get column of first free zero.
+ l=-A(r,n+1);
+
+ % If there are more free zeros in row r and row r in not
+ % yet marked as unexplored..
+ if (A(r,l)~=0 & RH(r)==0)
+ % Insert row r first in unexplored list.
+ RH(r)=RH(n+1);
+ RH(n+1)=r;
+
+ % Mark in which column the next unexplored zero in this row
+ % is.
+ CH(r)=-A(r,l);
+ end
+ else
+ % If all rows are explored..
+ if (RH(n+1)<=0)
+ % Reduce matrix.
+ [A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR);
+ end
+
+ % Re-start with first unexplored row.
+ r=RH(n+1);
+ % Get column of next free zero in row r.
+ l=CH(r);
+ % Advance "column of next free zero".
+ CH(r)=-A(r,l);
+ % If this zero is last in the list..
+ if (A(r,l)==0)
+ % ...remove row r from unexplored list.
+ RH(n+1)=RH(r);
+ RH(r)=0;
+ end
+ end
+
+ % While the column l is labelled, i.e. in path.
+ while (LC(l)~=0)
+ % If row r is explored..
+ if (RH(r)==0)
+ % If all rows are explored..
+ if (RH(n+1)<=0)
+ % Reduce cost matrix.
+ [A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR);
+ end
+
+ % Re-start with first unexplored row.
+ r=RH(n+1);
+ end
+
+ % Get column of next free zero in row r.
+ l=CH(r);
+
+ % Advance "column of next free zero".
+ CH(r)=-A(r,l);
+
+ % If this zero is last in list..
+ if(A(r,l)==0)
+ % ...remove row r from unexplored list.
+ RH(n+1)=RH(r);
+ RH(r)=0;
+ end
+ end
+
+ % If the column found is unassigned..
+ if (C(l)==0)
+ % Flip all zeros along the path in LR,LC.
+ [A,C,U]=hmflip(A,C,LC,LR,U,l,r);
+ % ...and exit to continue with next unassigned row.
+ break;
+ else
+ % ...else add zero to path.
+
+ % Label column l with row r.
+ LC(l)=r;
+
+ % Add l to the set of labelled columns.
+ SLC=[SLC l];
+
+ % Continue with the row assigned to column l.
+ r=C(l);
+
+ % Label row r with column l.
+ LR(r)=l;
+
+ % Add r to the set of labelled rows.
+ SLR=[SLR r];
+ end
+ end
+end
+
+% Calculate the total cost.
+T=sum(orig(logical(sparse(C,1:size(orig,2),1))));
+
+
+function A=hminired(A)
+%HMINIRED Initial reduction of cost matrix for the Hungarian method.
+%
+%B=assredin(A)
+%A - the unreduced cost matris.
+%B - the reduced cost matrix with linked zeros in each row.
+
+% v1.0 96-06-13. Niclas Borlin, niclas@cs.umu.se.
+
+[m,n]=size(A);
+
+% Subtract column-minimum values from each column.
+colMin=min(A);
+A=A-colMin(ones(n,1),:);
+
+% Subtract row-minimum values from each row.
+rowMin=min(A')';
+A=A-rowMin(:,ones(1,n));
+
+% Get positions of all zeros.
+[i,j]=find(A==0);
+
+% Extend A to give room for row zero list header column.
+A(1,n+1)=0;
+for k=1:n
+ % Get all column in this row.
+ cols=j(k==i)';
+ % Insert pointers in matrix.
+ A(k,[n+1 cols])=[-cols 0];
+end
+
+
+function [A,C,U]=hminiass(A)
+%HMINIASS Initial assignment of the Hungarian method.
+%
+%[B,C,U]=hminiass(A)
+%A - the reduced cost matrix.
+%B - the reduced cost matrix, with assigned zeros removed from lists.
+%C - a vector. C(J)=I means row I is assigned to column J,
+% i.e. there is an assigned zero in position I,J.
+%U - a vector with a linked list of unassigned rows.
+
+% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se.
+
+[n,np1]=size(A);
+
+% Initalize return vectors.
+C=zeros(1,n);
+U=zeros(1,n+1);
+
+% Initialize last/next zero "pointers".
+LZ=zeros(1,n);
+NZ=zeros(1,n);
+
+for i=1:n
+ % Set j to first unassigned zero in row i.
+ lj=n+1;
+ j=-A(i,lj);
+
+ % Repeat until we have no more zeros (j==0) or we find a zero
+ % in an unassigned column (c(j)==0).
+
+ while (C(j)~=0)
+ % Advance lj and j in zero list.
+ lj=j;
+ j=-A(i,lj);
+
+ % Stop if we hit end of list.
+ if (j==0)
+ break;
+ end
+ end
+
+ if (j~=0)
+ % We found a zero in an unassigned column.
+
+ % Assign row i to column j.
+ C(j)=i;
+
+ % Remove A(i,j) from unassigned zero list.
+ A(i,lj)=A(i,j);
+
+ % Update next/last unassigned zero pointers.
+ NZ(i)=-A(i,j);
+ LZ(i)=lj;
+
+ % Indicate A(i,j) is an assigned zero.
+ A(i,j)=0;
+ else
+ % We found no zero in an unassigned column.
+
+ % Check all zeros in this row.
+
+ lj=n+1;
+ j=-A(i,lj);
+
+ % Check all zeros in this row for a suitable zero in another row.
+ while (j~=0)
+ % Check the in the row assigned to this column.
+ r=C(j);
+
+ % Pick up last/next pointers.
+ lm=LZ(r);
+ m=NZ(r);
+
+ % Check all unchecked zeros in free list of this row.
+ while (m~=0)
+ % Stop if we find an unassigned column.
+ if (C(m)==0)
+ break;
+ end
+
+ % Advance one step in list.
+ lm=m;
+ m=-A(r,lm);
+ end
+
+ if (m==0)
+ % We failed on row r. Continue with next zero on row i.
+ lj=j;
+ j=-A(i,lj);
+ else
+ % We found a zero in an unassigned column.
+
+ % Replace zero at (r,m) in unassigned list with zero at (r,j)
+ A(r,lm)=-j;
+ A(r,j)=A(r,m);
+
+ % Update last/next pointers in row r.
+ NZ(r)=-A(r,m);
+ LZ(r)=j;
+
+ % Mark A(r,m) as an assigned zero in the matrix . . .
+ A(r,m)=0;
+
+ % ...and in the assignment vector.
+ C(m)=r;
+
+ % Remove A(i,j) from unassigned list.
+ A(i,lj)=A(i,j);
+
+ % Update last/next pointers in row r.
+ NZ(i)=-A(i,j);
+ LZ(i)=lj;
+
+ % Mark A(r,m) as an assigned zero in the matrix . . .
+ A(i,j)=0;
+
+ % ...and in the assignment vector.
+ C(j)=i;
+
+ % Stop search.
+ break;
+ end
+ end
+ end
+end
+
+% Create vector with list of unassigned rows.
+
+% Mark all rows have assignment.
+r=zeros(1,n);
+rows=C(C~=0);
+r(rows)=rows;
+empty=find(r==0);
+
+% Create vector with linked list of unassigned rows.
+U=zeros(1,n+1);
+U([n+1 empty])=[empty 0];
+
+
+function [A,C,U]=hmflip(A,C,LC,LR,U,l,r)
+%HMFLIP Flip assignment state of all zeros along a path.
+%
+%[A,C,U]=hmflip(A,C,LC,LR,U,l,r)
+%Input:
+%A - the cost matrix.
+%C - the assignment vector.
+%LC - the column label vector.
+%LR - the row label vector.
+%U - the
+%r,l - position of last zero in path.
+%Output:
+%A - updated cost matrix.
+%C - updated assignment vector.
+%U - updated unassigned row list vector.
+
+% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se.
+
+n=size(A,1);
+
+while (1)
+ % Move assignment in column l to row r.
+ C(l)=r;
+
+ % Find zero to be removed from zero list..
+
+ % Find zero before this.
+ m=find(A(r,:)==-l);
+
+ % Link past this zero.
+ A(r,m)=A(r,l);
+
+ A(r,l)=0;
+
+ % If this was the first zero of the path..
+ if (LR(r)<0)
+ ...remove row from unassigned row list and return.
+ U(n+1)=U(r);
+ U(r)=0;
+ return;
+ else
+
+ % Move back in this row along the path and get column of next zero.
+ l=LR(r);
+
+ % Insert zero at (r,l) first in zero list.
+ A(r,l)=A(r,n+1);
+ A(r,n+1)=-l;
+
+ % Continue back along the column to get row of next zero in path.
+ r=LC(l);
+ end
+end
+
+
+function [A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR)
+%HMREDUCE Reduce parts of cost matrix in the Hungerian method.
+%
+%[A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR)
+%Input:
+%A - Cost matrix.
+%CH - vector of column of 'next zeros' in each row.
+%RH - vector with list of unexplored rows.
+%LC - column labels.
+%RC - row labels.
+%SLC - set of column labels.
+%SLR - set of row labels.
+%
+%Output:
+%A - Reduced cost matrix.
+%CH - Updated vector of 'next zeros' in each row.
+%RH - Updated vector of unexplored rows.
+
+% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se.
+
+n=size(A,1);
+
+% Find which rows are covered, i.e. unlabelled.
+coveredRows=LR==0;
+
+% Find which columns are covered, i.e. labelled.
+coveredCols=LC~=0;
+
+r=find(~coveredRows);
+c=find(~coveredCols);
+
+% Get minimum of uncovered elements.
+m=min(min(A(r,c)));
+
+% Subtract minimum from all uncovered elements.
+A(r,c)=A(r,c)-m;
+
+% Check all uncovered columns..
+for j=c
+ % ...and uncovered rows in path order..
+ for i=SLR
+ % If this is a (new) zero..
+ if (A(i,j)==0)
+ % If the row is not in unexplored list..
+ if (RH(i)==0)
+ % ...insert it first in unexplored list.
+ RH(i)=RH(n+1);
+ RH(n+1)=i;
+ % Mark this zero as "next free" in this row.
+ CH(i)=j;
+ end
+ % Find last unassigned zero on row I.
+ row=A(i,:);
+ colsInList=-row(row<0);
+ if (length(colsInList)==0)
+ % No zeros in the list.
+ l=n+1;
+ else
+ l=colsInList(row(colsInList)==0);
+ end
+ % Append this zero to end of list.
+ A(i,l)=-j;
+ end
+ end
+end
+
+% Add minimum to all doubly covered elements.
+r=find(coveredRows);
+c=find(coveredCols);
+
+% Take care of the zeros we will remove.
+[i,j]=find(A(r,c)<=0);
+
+i=r(i);
+j=c(j);
+
+for k=1:length(i)
+ % Find zero before this in this row.
+ lj=find(A(i(k),:)==-j(k));
+ % Link past it.
+ A(i(k),lj)=A(i(k),j(k));
+ % Mark it as assigned.
+ A(i(k),j(k))=0;
+end
+
+A(r,c)=A(r,c)+m;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/image_rgb.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/image_rgb.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+function image_rgb(M)
+% Show a matrix of integers as a color image.
+% This is like imagesc, except we know what the mapping is from integer to color.
+% If entries of M contain integers in {1,2,3}, we map
+% this to red/green/blue
+
+cmap = [1 0 0; % red
+ 0 1 0; % green
+ 0 0 1; % blue
+ 127/255 1 212/255]; % aquamarine
+image(M)
+set(gcf,'colormap', cmap);
+
+if 1
+ % make dummy handles, one per object type, for the legend
+ str = {};
+ for i=1:size(cmap,1)
+ dummy_handle(i) = line([0 0.1], [0 0.1]);
+ set(dummy_handle(i), 'color', cmap(i,:));
+ set(dummy_handle(i), 'linewidth', 2);
+ str{i} = num2str(i);
+ end
+ legend(dummy_handle, str, -1);
+end
+
+if 0
+[nrows ncols] = size(M);
+img = zeros(nrows, ncols, 3);
+for r=1:nrows
+ for c=1:ncols
+ q = M(r,c);
+ img(r,c,q) = 1;
+ end
+end
+image(img)
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/imresizeAspect.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/imresizeAspect.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function img = imresizeAspect(img, maxSize)
+% function img = imresizeAspect(img, maxSize)
+% If image is larger than max size, reduce size, preserving aspect ratio of input.
+%
+% If size(input) = [y x] and maxSize = [yy xx],
+% then size(output) is given by the following (where a=y/x)
+% if y
+
+void rbinary(int num, int n, double *rbits){
+ int i, mask;
+ num = num - 1;
+
+ mask = 1 << (n-1); /* mask = 00100...0 , where the 1 is in column n (rightmost = col 1) */
+ for (i = 0; i < n; i++) {
+ rbits[n-i-1] = ((num & mask) == 0) ? 1 : 2;
+ num <<= 1;
+ }
+}
+
+void ind_subv(int num, const double *sizes, int n, double *rbits){
+ int i;
+ int *cumprod;
+
+ cumprod = malloc(n * sizeof(int));
+ num = num - 1;
+ cumprod[0] = 1;
+ for (i = 0; i < n-1; i++)
+ cumprod[i+1] = cumprod[i] * (int)sizes[i];
+ for (i = n-1; i >= 0; i--) {
+ rbits[i] = ((int)floor(num / cumprod[i])) + 1;
+ num = num % cumprod[i];
+ }
+ free(cumprod);
+}
+
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
+ int i, j, k, nCol, nRow, nnRow, binary, count, temp, temp1, start;
+ double *pSize, *pNdx, *pr;
+ double ndx;
+ int *subv, *cumprod, *templai;
+
+ pSize = mxGetPr(prhs[0]);
+ pNdx = mxGetPr(prhs[1]);
+ nCol = mxGetNumberOfElements(prhs[0]);
+ nnRow = mxGetNumberOfElements(prhs[1]);
+
+ nRow = 1;
+ for(i=0; i 2.0){
+ binary = 0;
+ break;
+ }
+ else if((int)pSize[i] == 1){
+ binary = 1;
+ }
+ }
+
+ if(nnRow == 1){
+ ndx = mxGetScalar(prhs[1]);
+ plhs[0] = mxCreateDoubleMatrix(1, nCol, mxREAL);
+ pr = mxGetPr(plhs[0]);
+ if(binary == 2)rbinary((int)ndx, nCol, pr);
+ else ind_subv((int)ndx, pSize, nCol, pr);
+ return;
+ }
+
+ plhs[0] = mxCreateDoubleMatrix(nnRow, nCol, mxREAL);
+ pr = mxGetPr(plhs[0]);
+
+ subv = malloc(nRow * nCol * sizeof(int));
+
+ if (binary == 2) {
+ for(j=0; j> k;
+ }
+ for(j=0; j 2) temp = 1;
+ for(k=0; k (int)pSize[j]) temp = 1;
+ for(k=0; k= 0 for any vector v.
+% We do this by checking that all the eigenvalues are non-negative.
+
+E = eig(M);
+if length(find(E>=0)) == length(E)
+ b = 1;
+else
+ b = 0;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/is_stochastic.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/is_stochastic.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function p = is_stochastic(T)
+% IS_STOCHASTIC Is the argument a stochastic matrix, i.e., the sum over the last dimension is 1.
+% p = is_stochastic(T)
+
+p = approxeq(T, mk_stochastic(T));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/isemptycell.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/isemptycell.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function E = isemptycell(C)
+% ISEMPTYCELL Apply the isempty function to each element of a cell array
+% E = isemptycell(C)
+%
+% This is equivalent to E = cellfun('isempty', C),
+% where cellfun is a function built-in to matlab version 5.3 or newer.
+
+if 0 % all(version('-release') >= 12)
+ E = cellfun('isempty', C);
+else
+ E = zeros(size(C));
+ for i=1:prod(size(C))
+ E(i) = isempty(C{i});
+ end
+ E = logical(E);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/isequalKPM.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/isequalKPM.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function p = isequalKPM(a,b)
+
+if isempty(a) & isempty(b)
+ p = 1;
+else
+ p = isequal(a,b);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/isposdef.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/isposdef.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function b = isposdef(a)
+% ISPOSDEF Test for positive definite matrix.
+% ISPOSDEF(A) returns 1 if A is positive definite, 0 otherwise.
+% Using chol is much more efficient than computing eigenvectors.
+
+% From Tom Minka's lightspeed toolbox
+
+[R,p] = chol(a);
+b = (p == 0);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/isscalarBNT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/isscalarBNT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function p = isscalarBNT(v)
+% ISSCALAR Returns 1 if all dimensions have size 1.
+% p = isscalar(v)
+
+p = (prod(size(v))==1);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/isvectorBNT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/isvectorBNT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function p = isvectorBNT(v)
+% ISVECTOR Returns 1 if all but one dimension have size 1.
+% p = isvector(v)
+%
+% Example: isvector(rand(1,2,1)) = 1, isvector(rand(2,2)) = 0.
+
+s=size(v);
+p = (ndims(v)<=2) & (s(1) == 1 | s(2) == 1);
+%p = sum( size(v) > 1) <= 1; % Peter Acklam's solution
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/junk.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/junk.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,67 @@
+
+ m = mxGetM(prhs[0]);
+ n = mxGetN(prhs[0]);
+ pr = mxGetPr(prhs[0]);
+ pi = mxGetPi(prhs[0]);
+ cmplx = (pi == NULL ? 0 : 1);
+
+ /* Allocate space for sparse matrix.
+ * NOTE: Assume at most 20% of the data is sparse. Use ceil
+ * to cause it to round up.
+ */
+
+ percent_sparse = 0.2;
+ nzmax = (int)ceil((double)m*(double)n*percent_sparse);
+
+ plhs[0] = mxCreateSparse(m,n,nzmax,cmplx);
+ sr = mxGetPr(plhs[0]);
+ si = mxGetPi(plhs[0]);
+ irs = mxGetIr(plhs[0]);
+ jcs = mxGetJc(plhs[0]);
+
+ /* Copy nonzeros. */
+ k = 0;
+ isfull = 0;
+ for (j = 0; (j < n); j++) {
+ int i;
+ jcs[j] = k;
+ for (i = 0; (i < m); i++) {
+ if (IsNonZero(pr[i]) || (cmplx && IsNonZero(pi[i]))) {
+
+ /* Check to see if non-zero element will fit in
+ * allocated output array. If not, increase
+ * percent_sparse by 10%, recalculate nzmax, and augment
+ * the sparse array.
+ */
+ if (k >= nzmax) {
+ int oldnzmax = nzmax;
+ percent_sparse += 0.1;
+ nzmax = (int)ceil((double)m*(double)n*percent_sparse);
+
+ /* Make sure nzmax increases atleast by 1. */
+ if (oldnzmax == nzmax)
+ nzmax++;
+
+ mxSetNzmax(plhs[0], nzmax);
+ mxSetPr(plhs[0], mxRealloc(sr, nzmax*sizeof(double)));
+ if (si != NULL)
+ mxSetPi(plhs[0], mxRealloc(si, nzmax*sizeof(double)));
+ mxSetIr(plhs[0], mxRealloc(irs, nzmax*sizeof(int)));
+
+ sr = mxGetPr(plhs[0]);
+ si = mxGetPi(plhs[0]);
+ irs = mxGetIr(plhs[0]);
+ }
+ sr[k] = pr[i];
+ if (cmplx) {
+ si[k] = pi[i];
+ }
+ irs[k] = i;
+ k++;
+ }
+ }
+ pr += m;
+ pi += m;
+ }
+ jcs[n] = k;
+}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/loadcell.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/loadcell.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,153 @@
+function [lc,dflag,dattype]=loadcell(fname,delim,exclusions,options);
+%function [lc,dflag,numdata]=loadcell(fname,delim,exclusions);
+%
+% loadcell loads a cell array with character delimited
+% data, which can have variable length lines and content.
+% Numeric values are converted from string to double
+% unless options is a string containing 'string'.
+%
+% loadcell is for use with small datasets. It is not optimised
+% for large datasets.
+%
+% fname is the filename to be loaded
+%
+% delim is/are the relevant delimiter(s). If char(10) is included
+% newlines are simply treated as delimiters and a 1-d array is created.
+%
+% exclusions are the set of characters to be treated as paired
+% braces: line ends or delimiters within braces are ignored.
+% braces are single characters and any brace can pair with
+% any other brace: no type pair checking is done.
+%
+% options can be omitted or can contain 'string' if no numeric
+% conversion is required, 'single' if multiple adjacent seperators
+% should not be treated as one, 'free' if all linefeeds should be stripped
+% first and 'empty2num' if empty fields are to be treated as numeric
+% zeros rather than an empty character set. Combine options using
+% concatenation.
+%
+% lc is a cell array containing the loaded data.
+%
+% dflag is a set of flags denoting the (i,j) values where data was entered
+% dflag(i,j)=1 implies lc(i,j) was loaded from the data, and not just set
+% to empty, say, by default.
+%
+% numdata is an array numdata(i,j)=NaN implies
+% lc(i,j) is a string, otherwise it stores the number at i,j.
+% This will occur regardless of whether the 'string' option is set.
+%
+% lc will return -1 if the file is not found or could not be
+% opened.
+%
+% Hint: numdata+(1/dflag-1) provides a concise descriptor for the numeric data
+% Inf=not loaded
+% NaN=was string or empty set.
+% otherwise numeric
+%
+% EXAMPLE
+%
+%[a,b]=loadcell('resultsfile',[',' char(9)],'"','single-string');
+% will load file 'resultsfile' into variable a, treating any of tab or
+% comma as delimiters. Delimiters or carriage returns lying
+% between two double inverted commas will be ignored. Two adjacent delimiters
+% will count twice, and all data will be kept as a string.
+%
+% Note: in space-separated data 'single' would generally be omitted,
+% wheras in comma-seperated data it would be included.
+%
+% Note the exclusion characters will remain in the final data, and any data
+% contained within or containing exclusion characters will not be
+% converted to numerics.
+%
+% (c) Amos Storkey 2002
+% v b160702
+
+% MATLAB is incapable of loading variable length lines or variable type values
+% with a whole file command under the standard library sets. This mfile
+% fills that gap.
+if (nargin<4)
+ options=' ';
+end;
+dflag = [];
+%Open file
+fid=fopen(fname,'rt');
+%Cannot open: return -1
+if (fid<0)
+ lc=-1;
+else
+ fullfile=fread(fid,'uchar=>char')';
+ %Strip LF if free is set
+ if ~isempty(findstr(options,'free'))
+ fullfile=strrep(fullfile,char(10),'');
+ end;
+ %Find all delimiters
+ delimpos=[];
+ for s=1:length(delim)
+ delimpos=[delimpos find(fullfile==delim(s))];
+ end
+ %Find all eol
+ endpos=find(fullfile==char(10));
+ endpos=setdiff(endpos,delimpos);
+ %find all exclusions
+ xclpos=[];
+ for s=1:length(exclusions);
+ xclpos=[xclpos find(fullfile==exclusions(s))];
+ end
+ sort(xclpos);
+ xclpos=[xclpos(1:2:end-1);xclpos(2:2:end)];
+ %Combine eol and delimiters
+ jointpos=union(delimpos,endpos);
+ t=1;
+ %Remove delim/eol within exclusion pairs
+ removedelim=[];
+ for s=1:length(jointpos)
+ if any((jointpos(s)>xclpos(1,:)) & (jointpos(s)a2
+% If a1 ~ a2, and a1>a2, then e^(a2-a1) is exp(small negative number),
+% which can be computed without underflow.
+
+% Same as logsumexp, except we assume a is a vector.
+% This avoids a call to repmat, which takes 50% of the time!
+
+a = a(:)'; % make row vector
+m = max(a);
+b = a - m*ones(1,length(a));
+s = m + log(sum(exp(b)));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/mahal2conf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/mahal2conf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,60 @@
+% MAHAL2CONF - Translates a Mahalanobis distance into a confidence
+% interval. Consider a multivariate Gaussian
+% distribution of the form
+%
+% p(x) = 1/sqrt((2 * pi)^d * det(C)) * exp((-1/2) * MD(x, m, inv(C)))
+%
+% where MD(x, m, P) is the Mahalanobis distance from x
+% to m under P:
+%
+% MD(x, m, P) = (x - m) * P * (x - m)'
+%
+% A particular Mahalanobis distance k identifies an
+% ellipsoid centered at the mean of the distribution.
+% The confidence interval associated with this ellipsoid
+% is the probability mass enclosed by it.
+%
+% If X is an d dimensional Gaussian-distributed vector,
+% then the Mahalanobis distance of X is distributed
+% according to the Chi-squared distribution with d
+% degrees of freedom. Thus, the confidence interval is
+% determined by integrating the chi squared distribution
+% up to the Mahalanobis distance of the measurement.
+%
+% Usage:
+%
+% c = mahal2conf(m, d);
+%
+% Inputs:
+%
+% m - the Mahalanobis radius of the ellipsoid
+% d - the number of dimensions of the Gaussian distribution
+%
+% Outputs:
+%
+% c - the confidence interval, i.e., the fraction of
+% probability mass enclosed by the ellipsoid with the
+% supplied Mahalanobis distance
+%
+% See also: CONF2MAHAL
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+function c = mahal2conf(m, d)
+
+c = chi2cdf(m, d);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/marg_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/marg_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function smallT = marg_table(bigT, bigdom, bigsz, onto, maximize)
+% MARG_TABLE Marginalize a table
+% smallT = marg_table(bigT, bigdom, bigsz, onto, maximize)
+
+if nargin < 5, maximize = 0; end
+
+
+smallT = myreshape(bigT, bigsz); % make sure it is a multi-dim array
+sum_over = mysetdiff(bigdom, onto);
+ndx = find_equiv_posns(sum_over, bigdom);
+if maximize
+ for i=1:length(ndx)
+ smallT = max(smallT, [], ndx(i));
+ end
+else
+ for i=1:length(ndx)
+ smallT = sum(smallT, ndx(i));
+ end
+end
+
+
+ns = zeros(1, max(bigdom));
+%ns(bigdom) = mysize(bigT); % ignores trailing dimensions of size 1
+ns(bigdom) = bigsz;
+
+smallT = squeeze(smallT); % remove all dimensions of size 1
+smallT = myreshape(smallT, ns(onto)); % put back relevant dims of size 1
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/marginalize_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/marginalize_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+function smallT = marginalize_table(bigT, bigdom, bigsz, onto, maximize)
+% MARG_TABLE Marginalize a table
+% function smallT = marginalize_table(bigT, bigdom, bigsz, onto, maximize)
+
+% Like marg_table in BNT, except we do not assume the domains are sorted
+
+if nargin < 5, maximize = 0; end
+
+
+smallT = myreshape(bigT, bigsz); % make sure it is a multi-dim array
+sum_over = mysetdiff(bigdom, onto);
+ndx = find_equiv_posns(sum_over, bigdom);
+if maximize
+ for i=1:length(ndx)
+ smallT = max(smallT, [], ndx(i));
+ end
+else
+ for i=1:length(ndx)
+ smallT = sum(smallT, ndx(i));
+ end
+end
+
+
+ns = zeros(1, max(bigdom));
+%ns(bigdom) = mysize(bigT); % ignores trailing dimensions of size 1
+ns(bigdom) = bigsz;
+
+% If onto has a different ordering than bigdom, the following
+% will produce the wrong results
+
+%smallT = squeeze(smallT); % remove all dimensions of size 1
+%smallT = myreshape(smallT, ns(onto)); % put back relevant dims of size 1
+
+% so permute dimensions to match desired ordering (as specified by onto)
+
+
+% like find_equiv_posns, but keeps ordering
+outdom = [onto sum_over];
+for i=1:length(outdom)
+ j = find(bigdom==outdom(i));
+ match(i) = j;
+end
+outdom = [onto sum_over];
+for i=1:length(outdom)
+ j = find(bigdom==outdom(i));
+ match(i) = j;
+end
+if match ~= 1
+ smallT = permute(smallT, match);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/matprint.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/matprint.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+% MATPRINT - prints a matrix with specified format string
+%
+% Usage: matprint(a, fmt, fid)
+%
+% a - Matrix to be printed.
+% fmt - C style format string to use for each value.
+% fid - Optional file id.
+%
+% Eg. matprint(a,'%3.1f') will print each entry to 1 decimal place
+
+% Peter Kovesi
+% School of Computer Science & Software Engineering
+% The University of Western Australia
+% pk @ csse uwa edu au
+% http://www.csse.uwa.edu.au/~pk
+%
+% March 2002
+
+function matprint(a, fmt, fid)
+
+ if nargin < 3
+ fid = 1;
+ end
+
+ [rows,cols] = size(a);
+
+ % Construct a format string for each row of the matrix consisting of
+ % 'cols' copies of the number formating specification
+ fmtstr = [];
+ for c = 1:cols
+ fmtstr = [fmtstr, ' ', fmt];
+ end
+ fmtstr = [fmtstr '\n']; % Add a line feed
+
+ fprintf(fid, fmtstr, a'); % Print the transpose of the matrix because
+ % fprintf runs down the columns of a matrix.
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/max_mult.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/max_mult.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+/* C mex version of max_mult.m in BPMRF2 directory */
+/* gcc -Wall -I/mit/matlab_v6.5/distrib/bin/glnx86 -c max_mult.c */
+
+#include
+#include "mex.h"
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
+{
+ int rows,cols,common,m,n,p;
+ double y1, y2;
+ double *arr1, *arr2, *arr3;
+
+
+ if (nrhs!=2 || nlhs>1)
+ mexErrMsgTxt("max_mult requires two inputs and one output");
+ if (mxIsChar(prhs[0]) || mxIsClass(prhs[0], "sparse") || mxIsComplex(prhs[0])
+ || mxIsChar(prhs[1]) || mxIsClass(prhs[1], "sparse") || mxIsComplex(prhs[1]))
+ mexErrMsgTxt("Inputs must be real, full, and nonstring");
+ if (mxGetN(prhs[0])!=mxGetM(prhs[1]))
+ mexErrMsgTxt("The number of columns of A must be the same as the number of rows of x");
+
+
+ arr1=mxGetPr(prhs[0]);
+ arr2=mxGetPr(prhs[1]);
+ p=mxGetN(prhs[0]);
+ m=mxGetM(prhs[0]);
+ n=mxGetN(prhs[1]);
+ plhs[0]=mxCreateDoubleMatrix(m, n, mxREAL);
+ arr3=mxMalloc(m*n*sizeof(double));
+
+ for (rows=0; rowsy1)
+ y1=y2;
+ }
+ arr3[rows+cols*m]=y1;
+ }
+
+ mxSetPr(plhs[0], arr3);
+
+}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/max_mult.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/max_mult.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function y=max_mult(A,x)
+% MAX_MULT Like matrix multiplication, but sum gets replaced by max
+% function y=max_mult(A,x) y(i) = max_j A(i,j) x(j)
+
+%X=ones(size(A,1),1) * x(:)'; % X(j,i) = x(i)
+%y=max(A.*X, [], 2);
+
+% This is faster
+if size(x,2)==1
+ X=x*ones(1,size(A,1)); % X(i,j) = x(i)
+ y=max(A'.*X)';
+else
+%this works for arbitrarily sized A and x (but is ugly, and slower than above)
+ X=repmat(x, [1 1 size(A,1)]);
+ B=repmat(A, [1 1 size(x,2)]);
+ C=permute(B,[2 3 1]);
+ y=permute(max(C.*X),[3 2 1]);
+% this is even slower, as is using squeeze instead of permute
+% Y=permute(X, [3 1 2]);
+% y=permute(max(Y.*B, [], 2), [1 3 2]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/mexutil.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/mexutil.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+#include "mexutil.h"
+
+/* Functions to create uninitialized arrays. */
+
+mxArray *mxCreateNumericArrayE(int ndim, const int *dims,
+ mxClassID class, mxComplexity ComplexFlag)
+{
+ mxArray *a;
+ int i, *dims1 = mxMalloc(ndim*sizeof(int));
+ size_t sz = 1;
+ for(i=0;i 0
+ h = hh;
+end
+
+%--------------------------------------------------------------
+%Parse Inputs Function
+
+function [I,map] = parse_inputs(varargin)
+
+% initialize variables
+map = [];
+
+iptchecknargin(1,2,nargin,mfilename);
+iptcheckinput(varargin{1},{'uint8' 'double' 'uint16' 'logical' 'single' ...
+ 'int16'},{},mfilename, 'I, BW, or RGB',1);
+I = varargin{1};
+
+if nargin==2
+ if isa(I,'int16')
+ eid = sprintf('Images:%s:invalidIndexedImage',mfilename);
+ msg1 = 'An indexed image can be uint8, uint16, double, single, or ';
+ msg2 = 'logical.';
+ error(eid,'%s %s',msg1, msg2);
+ end
+ map = varargin{2};
+ iptcheckinput(map,{'double'},{},mfilename,'MAP',1);
+ if ((size(map,1) == 1) && (prod(map) == numel(I)))
+ % MONTAGE(D,[M N P]) OBSOLETE
+ eid = sprintf('Images:%s:obsoleteSyntax',mfilename);
+ msg1 = 'MONTAGE(D,[M N P]) is an obsolete syntax.';
+ msg2 = 'Use multidimensional arrays to represent multiframe images.';
+ error(eid,'%s\n%s',msg1,msg2);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/montageKPM2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/montageKPM2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,69 @@
+function montageKPM2(data)
+% data(y,x,b,f) or data(y,x,f)
+% can be double - uses imagesc to display, not imshow
+% based on imaqmontage
+
+if ndims(data)==3
+ nr = size(data,1); nc = size(data,2); Npatches = size(data,3); Nbands = 1;
+ data = reshape(data, [nr nc Nbands Npatches]);
+else
+ nr = size(data,1); nc = size(data,2); Nbands = size(data,3); Npatches = size(data,4);
+end
+nativeVal = data(1, 1);
+dataOrig = data;
+
+%put a black border around them for display purposes
+border = 5;
+bgColor = min(data(:));
+%bgColor = max(data(:));
+data = bgColor*ones(nr+2*border, nc+2*border, Nbands, Npatches, class(data));
+data(border+1:end-border, border+1:end-border, :, :) = dataOrig;
+
+[width, height, bands, nFrames] = size(data);
+
+% Determine the number of axis rows and columns.
+axCols = sqrt(nFrames);
+if (axCols<1)
+ % In case we have a slim image.
+ axCols = 1;
+end
+axRows = nFrames/axCols;
+if (ceil(axCols)-axCols) < (ceil(axRows)-axRows),
+ axCols = ceil(axCols);
+ axRows = ceil(nFrames/axCols);
+else
+ axRows = ceil(axRows);
+ axCols = ceil(nFrames/axRows);
+end
+
+% Size the storage to hold all frames.
+storage = repmat(nativeVal, [axRows*width, axCols*height, bands, 1]);
+
+% Fill the storage up with data pixels.
+rows = 1:width;
+cols = 1:height;
+for i=0:axRows-1,
+ for j=0:axCols-1,
+ k = j+i*axCols+1;
+ if k<=nFrames,
+ storage(rows+i*width, cols+j*height, :) = data(:,:,:,k);
+ else
+ break;
+ end
+ end
+end
+
+
+% Display the tiled frames nicely and
+% pop the window forward.
+im = imagesc(storage);
+
+ax = get(im, 'Parent');
+fig = get(ax, 'Parent');
+set(ax, 'XTick', [], 'YTick', [])
+figure(fig)
+
+% If working with single band images, update the colormap.
+if 0 % bands==1,
+ colormap(gray);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/montageKPM3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/montageKPM3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function montageKPM3(data)
+% data{f}(y,x,b) - each frame can have a different size (can can even be empty)
+
+data2 = cell2matPad(data);
+montageKPM2(data2)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/mult_by_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/mult_by_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function bigT = mult_by_table(bigT, bigdom, bigsz, smallT, smalldom, smallsz)
+% MULT_BY_TABLE
+% bigT = mult_by_table(bigT, bigdom, bigsz, smallT, smalldom, smallsz)
+%
+
+Ts = extend_domain_table(smallT, smalldom, smallsz, bigdom, bigsz);
+bigT(:) = bigT(:) .* Ts(:); % must have bigT(:) on LHS to preserve shape
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/myintersect.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/myintersect.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function C = myintersect(A,B)
+% MYINTERSECT Intersection of two sets of positive integers (much faster than built-in intersect)
+% C = myintersect(A,B)
+
+A = A(:)'; B = B(:)';
+
+if isempty(A)
+ ma = 0;
+else
+ ma = max(A);
+end
+
+if isempty(B)
+ mb = 0;
+else
+ mb = max(B);
+end
+
+if ma==0 | mb==0
+ C = [];
+else
+ %bits = sparse(1, max(ma,mb));
+ bits = zeros(1, max(ma,mb));
+ bits(A) = 1;
+ C = B(logical(bits(B)));
+end
+
+%sum( bitget( bitand( cliquesb(i), cliquesb(j) ), 1:52 ) );
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/myismember.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/myismember.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function p = myismember(a,A)
+% MYISMEMBER Is 'a' an element of a set of positive integers? (much faster than built-in ismember)
+% p = myismember(a,A)
+
+%if isempty(A) | a < min(A) | a > max(A) % slow
+
+if length(A)==0
+ p = 0;
+elseif a < min(A)
+ p = 0;
+elseif a > max(A)
+ p = 0;
+else
+ bits = zeros(1, max(A));
+ bits(A) = 1;
+ p = bits(a);
+end
+p = logical(p);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/myones.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/myones.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function T = myones(sizes)
+% MYONES Like the built-in ones, except myones(k) produces a k*1 vector instead of a k*k matrix,
+% T = myones(sizes)
+
+if length(sizes)==0
+ T = 1;
+elseif length(sizes)==1
+ T = ones(sizes, 1);
+else
+ T = ones(sizes(:)');
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/myplot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/myplot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+colors = ['r' 'b' 'k' 'g' 'c' 'y' 'm' ...
+ 'r' 'b' 'k' 'g' 'c' 'y' 'm'];
+symbols = ['o' 'x' 's' '>' '<' '^' 'v' ...
+ '*' 'p' 'h' '+' 'd' 'o' 'x'];
+for i=1:length(colors)
+ styles{i} = sprintf('-%s%s', colors(i), symbols(i));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/myrand.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/myrand.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function T = myrand(sizes)
+% MYRAND Like the built-in rand, except myrand(k) produces a k*1 vector instead of a k*k matrix,
+% T = myrand(sizes)
+
+if length(sizes)==0
+ warning('myrand[]');
+ T = rand(1,1);
+elseif length(sizes)==1
+ T = rand(sizes, 1);
+else
+ T = rand(sizes);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/myrepmat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/myrepmat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function T = myrepmat(T, sizes)
+% MYREPMAT Like the built-in repmat, except myrepmat(T,n) == repmat(T,[n 1])
+% T = myrepmat(T, sizes)
+
+if length(sizes)==1
+ T = repmat(T, [sizes 1]);
+else
+ T = repmat(T, sizes(:)');
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/myreshape.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/myreshape.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function T = myreshape(T, sizes)
+% MYRESHAPE Like the built-in reshape, except myreshape(T,n) == reshape(T,[n 1])
+% T = myreshape(T, sizes)
+
+if length(sizes)==0
+ return;
+elseif length(sizes)==1
+ T = reshape(T, [sizes 1]);
+else
+ T = reshape(T, sizes(:)');
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/mysetdiff.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/mysetdiff.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function C = mysetdiff(A,B)
+% MYSETDIFF Set difference of two sets of positive integers (much faster than built-in setdiff)
+% C = mysetdiff(A,B)
+% C = A \ B = { things in A that are not in B }
+%
+% Original by Kevin Murphy, modified by Leon Peshkin
+
+if isempty(A)
+ C = [];
+ return;
+elseif isempty(B)
+ C = A;
+ return;
+else % both non-empty
+ bits = zeros(1, max(max(A), max(B)));
+ bits(A) = 1;
+ bits(B) = 0;
+ C = A(logical(bits(A)));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/mysize.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/mysize.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function sz = mysize(M)
+% MYSIZE Like the built-in size, except it returns n if M is a vector of length n, and 1 if M is a scalar.
+% sz = mysize(M)
+%
+% The behavior is best explained by examples
+% - M = rand(1,1), mysize(M) = 1, size(M) = [1 1]
+% - M = rand(2,1), mysize(M) = 2, size(M) = [2 1]
+% - M = rand(1,2), mysize(M) = 2, size(M) = [1 2]
+% - M = rand(2,2,1), mysize(M) = [2 2], size(M) = [2 2]
+% - M = rand(1,2,1), mysize(M) = 2, size(M) = [1 2]
+
+if isvectorBNT(M)
+ sz = length(M);
+else
+ sz = size(M);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/mysubset.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/mysubset.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function p=mysubset(small,large)
+% MYSUBSET Is the small set of +ve integers a subset of the large set?
+% p = mysubset(small, large)
+
+% Surprisingly, this is not built-in.
+
+if isempty(small)
+ p = 1; % isempty(large);
+else
+ p = length(myintersect(small,large)) == length(small);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/mysymsetdiff.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/mysymsetdiff.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function C = mysymsetdiff(A,B)
+% MYSYMSETDIFF Symmetric set difference of two sets of positive integers (much faster than built-in setdiff)
+% C = mysetdiff(A,B)
+% C = (A\B) union (B\A) = { things that A and B don't have in common }
+
+if isempty(A)
+ ma = 0;
+else
+ ma = max(A);
+end
+
+if isempty(B)
+ mb = 0;
+else
+ mb = max(B);
+end
+
+if ma==0
+ C = B;
+elseif mb==0
+ C = A;
+else % both non-empty
+ m = max(ma,mb);
+ bitsA = sparse(1, m);
+ bitsA(A) = 1;
+ bitsB = sparse(1, m);
+ bitsB(B) = 1;
+ C = find(xor(bitsA, bitsB));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/myunion.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/myunion.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function C = myunion(A,B)
+% MYUNION Union of two sets of positive integers (much faster than built-in union)
+% C = myunion(A,B)
+
+if isempty(A)
+ ma = 0;
+else
+ ma = max(A);
+end
+
+if isempty(B)
+ mb = 0;
+else
+ mb = max(B);
+end
+
+if ma==0 & mb==0
+ C = [];
+elseif ma==0 & mb>0
+ C = B;
+elseif ma>0 & mb==0
+ C = A;
+else
+ %bits = sparse(1, max(ma,mb));
+ bits = zeros(1, max(ma,mb));
+ bits(A) = 1;
+ bits(B) = 1;
+ C = find(bits);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/nchoose2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/nchoose2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+function c = nchoose2(v, f)
+%NCHOOSE2 All combinations of N elements taken two at a time.
+%
+% NCHOOSE2(1:N) or NCHOOSEK(V) where V is a vector of length N,
+% produces a matrix with N*(N-1)/2 rows and K columns. Each row of
+% the result has K of the elements in the vector V.
+%
+% NCHOOSE2(N,FLAG) is the same as NCHOOSE2(1:N) but faster.
+%
+% NCHOOSE2(V) is much faster than NCHOOSEK(V,2).
+%
+% See also NCHOOSEK, PERMS.
+
+% Author: Peter J. Acklam
+% Time-stamp: 2000-03-03 13:03:59
+% E-mail: jacklam@math.uio.no
+% URL: http://www.math.uio.no/~jacklam
+
+ nargs = nargin;
+ if nargs < 1
+ error('Not enough input arguments.');
+ elseif nargs == 1
+ v = v(:);
+ n = length(v);
+ elseif nargs == 2
+ n = v;
+ else
+ error('Too many input arguments.');
+ end
+
+ [ c(:,2), c(:,1) ] = find( tril( ones(n), -1 ) );
+
+ if nargs == 1
+ c = v(c);
+ end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/ncols.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/ncols.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function c = cols(x)
+% COLS The number of columns.
+% COLS is a more readable alternative to size(x,2).
+c = size(x,2);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/nonmaxsup.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/nonmaxsup.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+% NONMAXSUP - Non-maximal Suppression
+%
+% Usage: cim = nonmaxsup(im, radius)
+%
+% Arguments:
+% im - image to be processed.
+% radius - radius of region considered in non-maximal
+% suppression (optional). Typical values to use might
+% be 1-3. Default is 1.
+%
+% Returns:
+% cim - image with pixels that are not maximal within a
+% square neighborhood zeroed out.
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+function cim = nonmaxsup(m, radius)
+ if (nargin == 1) radius = 1; end
+ % Extract local maxima by performing a grey scale morphological
+ % dilation and then finding points in the corner strength image that
+ % match the dilated image and are also greater than the threshold.
+ sze = 2 * radius + 1; % Size of mask.
+ mx = ordfilt2(m, sze^2, ones(sze)); % Grey-scale dilate.
+ cim = sparse(m .* (m == mx));
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/normalise.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/normalise.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function [M, z] = normalise(A, dim)
+% NORMALISE Make the entries of a (multidimensional) array sum to 1
+% [M, c] = normalise(A)
+% c is the normalizing constant
+%
+% [M, c] = normalise(A, dim)
+% If dim is specified, we normalise the specified dimension only,
+% otherwise we normalise the whole array.
+
+if nargin < 2
+ z = sum(A(:));
+ % Set any zeros to one before dividing
+ % This is valid, since c=0 => all i. A(i)=0 => the answer should be 0/1=0
+ s = z + (z==0);
+ M = A / s;
+elseif dim==1 % normalize each column
+ z = sum(A);
+ s = z + (z==0);
+ %M = A ./ (d'*ones(1,size(A,1)))';
+ M = A ./ repmatC(s, size(A,1), 1);
+else
+ % Keith Battocchi - v. slow because of repmat
+ z=sum(A,dim);
+ s = z + (z==0);
+ L=size(A,dim);
+ d=length(size(A));
+ v=ones(d,1);
+ v(dim)=L;
+ %c=repmat(s,v);
+ c=repmat(s,v');
+ M=A./c;
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/normaliseC.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/normaliseC.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+/* C mex version of normalise.m in misc directory */
+
+#include "mex.h"
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
+{
+ double *T, *sum_ptr, sum;
+ int i, N;
+
+ plhs[0] = mxDuplicateArray(prhs[0]);
+ T = mxGetPr(plhs[0]);
+ if(mxIsSparse(plhs[0])) N = mxGetJc(plhs[0])[mxGetN(plhs[0])];
+ else N = mxGetNumberOfElements(plhs[0]);
+
+ plhs[1] = mxCreateDoubleMatrix(1, 1, mxREAL);
+ sum_ptr = mxGetPr(plhs[1]);
+
+ sum = 0;
+ for (i = 0; i < N; i++) {
+ sum += *T++;
+ }
+ T = mxGetPr(plhs[0]);
+ if (sum > 0) {
+ for (i = 0; i < N; i++) {
+ *T++ /= sum;
+ }
+ }
+ *sum_ptr = sum;
+}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/normaliseC.dll
Binary file toolboxes/FullBNT-1.0.7/KPMtools/normaliseC.dll has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/normalize.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/normalize.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function [M, z] = normalise(A, dim)
+% NORMALISE Make the entries of a (multidimensional) array sum to 1
+% [M, c] = normalise(A)
+% c is the normalizing constant
+%
+% [M, c] = normalise(A, dim)
+% If dim is specified, we normalise the specified dimension only,
+% otherwise we normalise the whole array.
+
+if nargin < 2
+ z = sum(A(:));
+ % Set any zeros to one before dividing
+ % This is valid, since c=0 => all i. A(i)=0 => the answer should be 0/1=0
+ s = z + (z==0);
+ M = A / s;
+elseif dim==1 % normalize each column
+ z = sum(A);
+ s = z + (z==0);
+ %M = A ./ (d'*ones(1,size(A,1)))';
+ M = A ./ repmatC(s, size(A,1), 1);
+else
+ % Keith Battocchi - v. slow because of repmat
+ z=sum(A,dim);
+ s = z + (z==0);
+ L=size(A,dim);
+ d=length(size(A));
+ v=ones(d,1);
+ v(dim)=L;
+ %c=repmat(s,v);
+ c=repmat(s,v');
+ M=A./c;
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/nrows.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/nrows.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function r = rows(x)
+% ROWS The number of rows.
+% ROWS is a more readable alternative to size(x,1).
+r = size(x,1);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/num2strcell.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/num2strcell.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function c = num2strcell(n, format)
+% num2strcell Convert vector of numbers to cell array of strings
+% function c = num2strcell(n, format)
+%
+% If format is omitted, we use
+% c{i} = sprintf('%d', n(i))
+
+if nargin < 2, format = '%d'; end
+
+N = length(n);
+c = cell(1,N);
+for i=1:N
+ c{i} = sprintf(format, n(i));
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/optimalMatching.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/optimalMatching.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,90 @@
+% MATCH - Solves the weighted bipartite matching (or assignment)
+% problem.
+%
+% Usage: a = match(C);
+%
+% Arguments:
+% C - an m x n cost matrix; the sets are taken to be
+% 1:m and 1:n; C(i, j) gives the cost of matching
+% items i (of the first set) and j (of the second set)
+%
+% Returns:
+%
+% a - an m x 1 assignment vector, which gives the
+% minimum cost assignment. a(i) is the index of
+% the item of 1:n that was matched to item i of
+% 1:m. If item i (of 1:m) was not matched to any
+% item of 1:n, then a(i) is zero.
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+function [a] = optimalMatching(C)
+
+% Trivial cases:
+[p, q] = size(C);
+if (p == 0)
+ a = [];
+ return;
+elseif (q == 0)
+ a = zeros(p, 1);
+ return;
+end
+
+
+if 0
+% First, reduce the problem by making easy optimal matches. If two
+% elements agree that they are the best match, then match them up.
+[x, a] = min(C, [], 2);
+[y, b] = min(C, [], 1);
+u = find(1:p ~= b(a(:)));
+a(u) = 0;
+v = find(1:q ~= a(b(:))');
+C = C(u, v);
+if (isempty(C)) return; end
+end
+
+% Get the (new) size of the two sets, u and v.
+[m, n] = size(C);
+
+%mx = realmax;
+mx = 2*max(C(:));
+mn = -2*min(C(:));
+% Pad the affinity matrix to be square
+if (m < n)
+ C = [C; mx * ones(n - m, n)];
+elseif (n < m)
+ C = [C, mx * ones(m, m - n)];
+end
+
+% Run the Hungarian method. First replace infinite values by the
+% largest (or smallest) finite values.
+C(find(isinf(C) & (C > 0))) = mx;
+C(find(isinf(C) & (C < 0))) = mn;
+%fprintf('running hungarian\n');
+[b, cost] = hungarian(C');
+
+% Extract only the real assignments
+ap = b(1:m)';
+ap(find(ap > n)) = 0;
+
+a = ap;
+%% Incorporate this sub-assignment into the complete assignment
+% k = find(ap);
+% a(u(k)) = v(ap(k));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/optimalMatchingTest.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/optimalMatchingTest.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+% Consider matching sources to detections
+
+% s1 d2
+% s2 d3
+% d1
+
+a = optimalMatching([52;0.01])
+
+% sources(:,i) = [x y] coords
+sources = [0.1 0.7; 0.6 0.4]';
+detections = [0.2 0.2; 0.2 0.8; 0.7 0.1]';
+dst = sqdist(sources, detections)
+
+% a = [2 3] which means s1-d2, s2-d3
+a = optimalMatching(dst)
+
+% a = [0 1 2] which means d1-0, d2-s1, d3-s2
+a = optimalMatching(dst')
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/partitionData.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/partitionData.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function varargout = partitionData(Ndata, varargin)
+% PARTITIONDATA Partition a vector of indices into random sets
+% [a,b,c,...] = partitionData(N, 0.3, 0.2, 0.5, ...)
+%
+% Examples:
+% [a,b,c]=partitionData(105,0.3,0.2,0.5);
+% a= 1:30, b=32:52, c=52:105 (last bin gets all the left over)
+
+Npartitions = length(varargin);
+perm = randperm(Ndata);
+%perm = 1:Ndata;
+ndx = 1;
+for i=1:Npartitions
+ pc(i) = varargin{i};
+ Nbin(i) = fix(Ndata*pc(i));
+ low(i) = ndx;
+ if i==Npartitions
+ high(i) = Ndata;
+ else
+ high(i) = low(i)+Nbin(i)-1;
+ end
+ varargout{i} = perm(low(i):high(i));
+ ndx = ndx+Nbin(i);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/partition_matrix_vec.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/partition_matrix_vec.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function [m1, m2, K11, K12, K21, K22] = partition_matrix_vec(m, K, n1, n2, bs)
+% PARTITION_MATRIX_VEC Partition a vector and matrix into blocks.
+% [m1, m2, K11, K12, K21, K22] = partition_matrix_vec(m, K, blocks1, blocks2, bs)
+%
+% bs(i) = block size of i'th node
+%
+% Example:
+% n1 = [6 8], n2 = [5], bs = [- - - - 2 1 - 2], where - = don't care
+% m = [0.1 0.2 0.3 0.4 0.5], K = some 5*5 matrix,
+% So E[X5] = [0.1 0.2], E[X6] = [0.3], E[X8] = [0.4 0.5]
+% m1 = [0.3 0.4 0.5], m2 = [0.1 0.2];
+
+dom = myunion(n1, n2);
+n1i = block(find_equiv_posns(n1, dom), bs(dom));
+n2i = block(find_equiv_posns(n2, dom), bs(dom));
+m1 = m(n1i);
+m2 = m(n2i);
+K11 = K(n1i, n1i);
+K12 = K(n1i, n2i);
+K21 = K(n2i, n1i);
+K22 = K(n2i, n2i);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/pca_kpm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/pca_kpm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function [pc_vec]=pca_kpm(features,N, method);
+% PCA_KPM Compute top N principal components using eigs or svd.
+% [pc_vec]=pca_kpm(features,N)
+%
+% features(:,i) is the i'th example - each COLUMN is an observation
+% pc_vec(:,j) is the j'th basis function onto which you should project the data
+% using pc_vec' * features
+
+[d ncases] = size(features);
+fm=features-repmat(mean(features,2), 1, ncases);
+
+
+if method==1 % d*d < d*ncases
+ fprintf('pca_kpm eigs\n');
+ options.disp = 0;
+ C = cov(fm'); % d x d matrix
+ [pc_vec, evals] = eigs(C, N, 'LM', options);
+else
+ % [U,D,V] = SVD(fm), U(:,i)=evec of fm fm', V(:,i) = evec of fm' fm
+ fprintf('pca_kpm svds\n');
+ [U,D,V] = svds(fm', N);
+ pc_vec = V;
+end
+
+if 0
+X = randn(5,3);
+X = X-repmat(mean(X),5,1);
+C = X'*X;
+C2=cov(X)
+[U,D,V]=svd(X);
+[V2,D2]=eig(X)
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/pca_netlab.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/pca_netlab.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+function [PCcoeff, PCvec] = pca(data, N)
+%PCA Principal Components Analysis
+%
+% Description
+% PCCOEFF = PCA(DATA) computes the eigenvalues of the covariance
+% matrix of the dataset DATA and returns them as PCCOEFF. These
+% coefficients give the variance of DATA along the corresponding
+% principal components.
+%
+% PCCOEFF = PCA(DATA, N) returns the largest N eigenvalues.
+%
+% [PCCOEFF, PCVEC] = PCA(DATA) returns the principal components as well
+% as the coefficients. This is considerably more computationally
+% demanding than just computing the eigenvalues.
+%
+% See also
+% EIGDEC, GTMINIT, PPCA
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargin == 1
+ N = size(data, 2);
+end
+
+if nargout == 1
+ evals_only = logical(1);
+else
+ evals_only = logical(0);
+end
+
+if N ~= round(N) | N < 1 | N > size(data, 2)
+ error('Number of PCs must be integer, >0, < dim');
+end
+
+% Find the sorted eigenvalues of the data covariance matrix
+if evals_only
+ PCcoeff = eigdec(cov(data), N);
+else
+ [PCcoeff, PCvec] = eigdec(cov(data), N);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/pick.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/pick.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function [i,j] = pick(ndx)
+% PICK Pick an entry at random from a vector
+% function [i,j] = pick(ndx)
+%
+% i = ndx(j) for j ~ U(1:length(ndx))
+
+dist = normalize(ones(1,length(ndx)));
+j = sample_discrete(dist);
+i = ndx(j);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotBox.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotBox.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [h, ht] =plotBox(box, col, str)
+% function h=plotBox(box, col, str)
+%
+% box = [xlow xhigh ylow yhigh]
+% col = color (default - red)
+% str = string printed at center (default '')
+
+if nargin < 2, col = 'r'; end
+if nargin < 3, str = ''; end
+
+box = double(box); % fails on single
+
+h = plot([box(1) box(2) box(2) box(1) box(1)], [ box(3) box(3) box(4) box(4) box(3)]);
+set(h, 'color', col);
+set(h, 'linewidth', 2);
+if ~isempty(str)
+ xc = mean(box(1:2));
+ yc = mean(box(3:4));
+ ht = text(xc, yc, str);
+else
+ ht = [];
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotColors.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotColors.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function styles = plotColors()
+
+colors = ['r' 'b' 'k' 'g' 'c' 'y' 'm' ...
+ 'r' 'b' 'k' 'g' 'c' 'y' 'm'];
+symbols = ['o' 'x' '+' '>' '<' '^' 'v' ...
+ '*' 'p' 'h' 's' 'd' 'o' 'x'];
+for i=1:length(colors)
+ styles{i} = sprintf('-%s%s', colors(i), symbols(i));
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotROC.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotROC.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function [falseAlarmRate, detectionRate, area, th] = plotROC(confidence, testClass, col, varargin)
+% function [falseAlarmRate, detectionRate, area, th] = plotroc(confidence, testClass, color)
+
+if nargin < 3, col = []; end
+
+[scale01] = process_options(varargin, 'scale01', 1);
+
+[falseAlarmRate detectionRate area th] = computeROC(confidence, testClass);
+
+if ~isempty(col)
+ h=plot(falseAlarmRate, detectionRate, [col '-']);
+ %set(h, 'linewidth', 2);
+ ex = 0.05*max(falseAlarmRate);
+ ey = 0.05;
+ if scale01
+ axis([0-ex max(falseAlarmRate)+ex 0-ey 1+ey])
+ else
+ % zoom in on the top left corner
+ axis([0-ex max(falseAlarmRate)*0.5+ex 0.5-ey 1+ey])
+ end
+ grid on
+ ylabel('detection rate')
+ %xlabel('# false alarms')
+ xlabel('false alarm rate')
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotROCkpm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotROCkpm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,69 @@
+function [falseAlarmRate, detectionRate, area, th] = plotROC(confidence, testClass, col, varargin)
+% You pass the scores and the classes, and the function returns the false
+% alarm rate and the detection rate for different points across the ROC.
+%
+% [faR, dR] = plotROC(score, class)
+%
+% faR (false alarm rate) is uniformly sampled from 0 to 1
+% dR (detection rate) is computed using the scores.
+%
+% class = 0 => target absent
+% class = 1 => target present
+%
+% score is the output of the detector, or any other measure of detection.
+% There is no plot unless you add a third parameter that is the color of
+% the graph. For instance:
+% [faR, dR] = plotROC(score, class, 'r')
+%
+% faR, dR are size 1x1250
+
+if nargin < 3, col = []; end
+[scale01] = process_options(varargin, 'scale01', 1);
+
+S = rand('state');
+rand('state',0);
+confidence = confidence + rand(size(confidence))*10^(-10);
+rand('state',S)
+
+ndxAbs = find(testClass==0); % absent
+ndxPres = find(testClass==1); % present
+
+[th, j] = sort(confidence(ndxAbs));
+th = th(fix(linspace(1, length(th), 1250)));
+
+cAbs = confidence(ndxAbs);
+cPres = confidence(ndxPres);
+for t=1:length(th)
+ if length(ndxPres) == 0
+ detectionRate(t) = 0;
+ else
+ detectionRate(t) = sum(cPres>=th(t)) / length(ndxPres);
+ end
+ if length(ndxAbs) == 0
+ falseAlarmRate(t) = 0;
+ else
+ falseAlarmRate(t) = sum(cAbs>=th(t)) / length(ndxAbs);
+ end
+
+ %detectionRate(t) = sum(confidence(ndxPres)>=th(t)) / length(ndxPres);
+ %falseAlarmRate(t) = sum(confidence(ndxAbs)>=th(t)) / length(ndxAbs);
+ %detections(t) = sum(confidence(ndxPres)>=th(t));
+ %falseAlarms(t) = sum(confidence(ndxAbs)>=th(t));
+end
+
+area = sum(abs(falseAlarmRate(2:end) - falseAlarmRate(1:end-1)) .* detectionRate(2:end));
+
+if ~isempty(col)
+ h=plot(falseAlarmRate, detectionRate, [col '-']);
+ %set(h, 'linewidth', 2);
+ e = 0.05;
+ if scale01
+ axis([0-e 1+e 0-e 1+e])
+ else
+ % zoom in on the top left corner
+ axis([0-e 0.5+e 0.5-e 1+e])
+ end
+ grid on
+ ylabel('detection rate')
+ xlabel('false alarm rate')
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plot_axis_thru_origin.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plot_axis_thru_origin.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+function plot_axis_thru_origin()
+
+lnx=line(get(gca,'xlim'),[0 0]); lny=line([0 0],get(gca,'ylim'));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plot_ellipse.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plot_ellipse.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+% PLOT_ELLIPSE
+% h=plot_ellipse(x,y,theta,a,b)
+%
+% This routine plots an ellipse with centre (x,y), axis lengths a,b
+% with major axis at an angle of theta radians from the horizontal.
+
+%
+% Author: P. Fieguth
+% Jan. 98
+%
+%http://ocho.uwaterloo.ca/~pfieguth/Teaching/372/plot_ellipse.m
+
+function h=plot_ellipse(x,y,theta,a,b)
+
+np = 100;
+ang = [0:np]*2*pi/np;
+R = [cos(theta) -sin(theta); sin(theta) cos(theta)];
+pts = [x;y]*ones(size(ang)) + R*[cos(ang)*a; sin(ang)*b];
+h=plot( pts(1,:), pts(2,:) );
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plot_matrix.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plot_matrix.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+function plot_matrix(G, bw)
+% PLOT_MATRIX Plot a 2D matrix as a grayscale image, and label the axes
+%
+% plot_matrix(M)
+%
+% For 0/1 matrices (eg. adjacency matrices), use
+% plot_matrix(M,1)
+
+if nargin < 2, bw = 0; end
+
+if 0
+ imagesc(G)
+ %image(G)
+ %colormap([1 1 1; 0 0 0]); % black squares on white background
+ %colormap(gray)
+ grid on
+ n = length(G);
+
+ % shift the grid lines so they don't intersect the squares
+ set(gca,'xtick',1.5:1:n);
+ set(gca,'ytick',1.5:1:n);
+
+ % Turn off the confusing labels, which are fractional
+ % Ideally we could shift the labels to lie between the axis lines...
+% set(gca,'xticklabel', []);
+% set(gca,'yticklabel', []);
+else
+ % solution provided by Jordan Rosenthal
+ % You can plot the grid lines manually:
+ % This uses the trick that a point with a value nan does not get plotted.
+ imagesc(G);
+ if bw
+ colormap([1 1 1; 0 0 0]);
+ end
+ n = length(G);
+ x = 1.5:1:n;
+ x = [ x; x; repmat(nan,1,n-1) ];
+ y = [ 0.5 n+0.5 nan ].';
+ y = repmat(y,1,n-1);
+ x = x(:);
+ y = y(:);
+ line(x,y,'linestyle',':','color','k');
+ line(y,x,'linestyle',':','color','k');
+ set(gca,'xtick',1:n)
+ set(gca,'ytick',1:n)
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plot_polygon.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plot_polygon.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+function out=plot_polygon(p, args, close_loop)
+% PLOT_POLYGON
+% function handle=plot_polygon(p, args, close_loop)
+% p(1,i), p(2,i) are the x/y coords of point i.
+% If non-empty, args are passed thru to the plot command.
+% If close_loop = 1, connect the last point to the first
+
+% All rights reserved. Documentation updated April 1999.
+% Matt Kawski. http://math.la.asu.edu/~kawski
+% He calls it pplot
+
+if nargin < 2, args = []; end
+if nargin < 3, close_loop = 0; end
+
+if close_loop
+ p = [p p(:,1)];
+end
+
+if isempty(args)
+ out=plot(p(1,:),p(2,:));
+else
+ out=plot(p(1,:),p(2,:),args);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotcov2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotcov2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+% PLOTCOV2 - Plots a covariance ellipse with major and minor axes
+% for a bivariate Gaussian distribution.
+%
+% Usage:
+% h = plotcov2(mu, Sigma[, OPTIONS]);
+%
+% Inputs:
+% mu - a 2 x 1 vector giving the mean of the distribution.
+% Sigma - a 2 x 2 symmetric positive semi-definite matrix giving
+% the covariance of the distribution (or the zero matrix).
+%
+% Options:
+% 'conf' - a scalar between 0 and 1 giving the confidence
+% interval (i.e., the fraction of probability mass to
+% be enclosed by the ellipse); default is 0.9.
+% 'num-pts' - the number of points to be used to plot the
+% ellipse; default is 100.
+%
+% This function also accepts options for PLOT.
+%
+% Outputs:
+% h - a vector of figure handles to the ellipse boundary and
+% its major and minor axes
+%
+% See also: PLOTCOV3
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+function h = plotcov2(mu, Sigma, varargin)
+
+if size(Sigma) ~= [2 2], error('Sigma must be a 2 by 2 matrix'); end
+if length(mu) ~= 2, error('mu must be a 2 by 1 vector'); end
+
+[p, ...
+ n, ...
+ plot_opts] = process_options(varargin, 'conf', 0.9, ...
+ 'num-pts', 100);
+h = [];
+holding = ishold;
+if (Sigma == zeros(2, 2))
+ z = mu;
+else
+ % Compute the Mahalanobis radius of the ellipsoid that encloses
+ % the desired probability mass.
+ k = conf2mahal(p, 2);
+ % The major and minor axes of the covariance ellipse are given by
+ % the eigenvectors of the covariance matrix. Their lengths (for
+ % the ellipse with unit Mahalanobis radius) are given by the
+ % square roots of the corresponding eigenvalues.
+ if (issparse(Sigma))
+ [V, D] = eigs(Sigma);
+ else
+ [V, D] = eig(Sigma);
+ end
+ % Compute the points on the surface of the ellipse.
+ t = linspace(0, 2*pi, n);
+ u = [cos(t); sin(t)];
+ w = (k * V * sqrt(D)) * u;
+ z = repmat(mu, [1 n]) + w;
+ % Plot the major and minor axes.
+ L = k * sqrt(diag(D));
+ h = plot([mu(1); mu(1) + L(1) * V(1, 1)], ...
+ [mu(2); mu(2) + L(1) * V(2, 1)], plot_opts{:});
+ hold on;
+ h = [h; plot([mu(1); mu(1) + L(2) * V(1, 2)], ...
+ [mu(2); mu(2) + L(2) * V(2, 2)], plot_opts{:})];
+end
+
+h = [h; plot(z(1, :), z(2, :), plot_opts{:})];
+if (~holding) hold off; end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotcov2New.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotcov2New.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,119 @@
+% PLOTCOV2 - Plots a covariance ellipsoid with axes for a bivariate
+% Gaussian distribution.
+%
+% Usage:
+% [h, s] = plotcov2(mu, Sigma[, OPTIONS]);
+%
+% Inputs:
+% mu - a 2 x 1 vector giving the mean of the distribution.
+% Sigma - a 2 x 2 symmetric positive semi-definite matrix giving
+% the covariance of the distribution (or the zero matrix).
+%
+% Options:
+% 'conf' - a scalar between 0 and 1 giving the confidence
+% interval (i.e., the fraction of probability mass to
+% be enclosed by the ellipse); default is 0.9.
+% 'num-pts' - if the value supplied is n, then (n + 1)^2 points
+% to be used to plot the ellipse; default is 20.
+% 'label' - if non-empty, a string that will label the
+% ellipsoid (default: [])
+% 'plot-axes' - a 0/1 flag indicating if the ellipsoid's axes
+% should be plotted (default: 1)
+% 'plot-opts' - a cell vector of arguments to be handed to PLOT3
+% to contol the appearance of the axes, e.g.,
+% {'Color', 'g', 'LineWidth', 1}; the default is {}
+% 'fill-color' - a color specifier; is this is not [], the
+% covariance ellipse is filled with this color
+% (default: [])
+%
+% Outputs:
+% h - a vector of handles on the axis lines
+%
+% See also: PLOTCOV3
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+function [h, s] = plotcov2New(mu, Sigma, varargin)
+
+h = [];
+s = [];
+
+if size(Sigma) ~= [2 2], error('Sigma must be a 2 by 2 matrix'); end
+if length(mu) ~= 2, error('mu must be a 2 by 1 vector'); end
+
+Sigma = checkpsd(Sigma);
+
+[p, ...
+ n, ...
+ label, ...
+ plot_axes, ...
+ plot_opts, ...
+ fill_color] = process_options(varargin, 'conf', 0.9, ...
+ 'num-pts', 20, ...
+ 'label', [], ...
+ 'plot-axes', 1, ...
+ 'plot-opts', {}, ...
+ 'fill-color', []);
+holding = ishold;
+% Compute the Mahalanobis radius of the ellipsoid that encloses
+% the desired probability mass.
+k = conf2mahal(p, 2);
+% Scale the covariance matrix so the confidence region has unit
+% Mahalanobis distance.
+Sigma = Sigma * k;
+% The axes of the covariance ellipse are given by the eigenvectors of
+% the covariance matrix. Their lengths (for the ellipse with unit
+% Mahalanobis radius) are given by the square roots of the
+% corresponding eigenvalues.
+[V, D] = eig(full(Sigma));
+V = real(V);
+D = real(D);
+D = abs(D);
+
+% Compute the points on the boundary of the ellipsoid.
+t = linspace(0, 2*pi, n);
+u = [cos(t(:))'; sin(t(:))'];
+w = (V * sqrt(D)) * u;
+z = repmat(mu(:), [1 n]) + w;
+h = [h; plot(z(1, :), z(2, :), plot_opts{:})];
+if (~isempty(fill_color))
+ s = patch(z(1, :), z(2, :), fill_color);
+end
+
+% Plot the axes.
+if (plot_axes)
+ hold on;
+ L = sqrt(diag(D));
+ h = plot([mu(1); mu(1) + L(1) * V(1, 1)], ...
+ [mu(2); mu(2) + L(1) * V(2, 1)], plot_opts{:});
+ h = [h; plot([mu(1); mu(1) + L(2) * V(1, 2)], ...
+ [mu(2); mu(2) + L(2) * V(2, 2)], plot_opts{:})];
+end
+
+
+if (~isempty(label))
+ th = text(mu(1), mu(2), label);
+ set(th, 'FontSize', 18);
+ set(th, 'FontName', 'Times');
+ set(th, 'FontWeight', 'bold');
+ set(th, 'FontAngle', 'italic');
+ set(th, 'HorizontalAlignment', 'center');
+end
+
+if (~holding & plot_axes) hold off; end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotcov3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotcov3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,109 @@
+% PLOTCOV3 - Plots a covariance ellipsoid with axes for a trivariate
+% Gaussian distribution.
+%
+% Usage:
+% [h, s] = plotcov3(mu, Sigma[, OPTIONS]);
+%
+% Inputs:
+% mu - a 3 x 1 vector giving the mean of the distribution.
+% Sigma - a 3 x 3 symmetric positive semi-definite matrix giving
+% the covariance of the distribution (or the zero matrix).
+%
+% Options:
+% 'conf' - a scalar between 0 and 1 giving the confidence
+% interval (i.e., the fraction of probability mass to
+% be enclosed by the ellipse); default is 0.9.
+% 'num-pts' - if the value supplied is n, then (n + 1)^2 points
+% to be used to plot the ellipse; default is 20.
+% 'plot-opts' - a cell vector of arguments to be handed to PLOT3
+% to contol the appearance of the axes, e.g.,
+% {'Color', 'g', 'LineWidth', 1}; the default is {}
+% 'surf-opts' - a cell vector of arguments to be handed to SURF
+% to contol the appearance of the ellipsoid
+% surface; a nice possibility that yields
+% transparency is: {'EdgeAlpha', 0, 'FaceAlpha',
+% 0.1, 'FaceColor', 'g'}; the default is {}
+%
+% Outputs:
+% h - a vector of handles on the axis lines
+% s - a handle on the ellipsoid surface object
+%
+% See also: PLOTCOV2
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+function [h, s] = plotcov3(mu, Sigma, varargin)
+
+if size(Sigma) ~= [3 3], error('Sigma must be a 3 by 3 matrix'); end
+if length(mu) ~= 3, error('mu must be a 3 by 1 vector'); end
+
+[p, ...
+ n, ...
+ plot_opts, ...
+ surf_opts] = process_options(varargin, 'conf', 0.9, ...
+ 'num-pts', 20, ...
+ 'plot-opts', {}, ...
+ 'surf-opts', {});
+h = [];
+holding = ishold;
+if (Sigma == zeros(3, 3))
+ z = mu;
+else
+ % Compute the Mahalanobis radius of the ellipsoid that encloses
+ % the desired probability mass.
+ k = conf2mahal(p, 3);
+ % The axes of the covariance ellipse are given by the eigenvectors of
+ % the covariance matrix. Their lengths (for the ellipse with unit
+ % Mahalanobis radius) are given by the square roots of the
+ % corresponding eigenvalues.
+ if (issparse(Sigma))
+ [V, D] = eigs(Sigma);
+ else
+ [V, D] = eig(Sigma);
+ end
+ if (any(diag(D) < 0))
+ error('Invalid covariance matrix: not positive semi-definite.');
+ end
+ % Compute the points on the surface of the ellipsoid.
+ t = linspace(0, 2*pi, n);
+ [X, Y, Z] = sphere(n);
+ u = [X(:)'; Y(:)'; Z(:)'];
+ w = (k * V * sqrt(D)) * u;
+ z = repmat(mu(:), [1 (n + 1)^2]) + w;
+
+ % Plot the axes.
+ L = k * sqrt(diag(D));
+ h = plot3([mu(1); mu(1) + L(1) * V(1, 1)], ...
+ [mu(2); mu(2) + L(1) * V(2, 1)], ...
+ [mu(3); mu(3) + L(1) * V(3, 1)], plot_opts{:});
+ hold on;
+ h = [h; plot3([mu(1); mu(1) + L(2) * V(1, 2)], ...
+ [mu(2); mu(2) + L(2) * V(2, 2)], ...
+ [mu(3); mu(3) + L(2) * V(3, 2)], plot_opts{:})];
+ h = [h; plot3([mu(1); mu(1) + L(3) * V(1, 3)], ...
+ [mu(2); mu(2) + L(3) * V(2, 3)], ...
+ [mu(3); mu(3) + L(3) * V(3, 3)], plot_opts{:})];
+end
+
+s = surf(reshape(z(1, :), [(n + 1) (n + 1)]), ...
+ reshape(z(2, :), [(n + 1) (n + 1)]), ...
+ reshape(z(3, :), [(n + 1) (n + 1)]), ...
+ surf_opts{:});
+
+if (~holding) hold off; end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotgauss1d.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotgauss1d.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function h = plotgauss1d(mu, sigma2)
+% function h = plotgauss1d(mu, sigma^2)
+% Example
+% plotgauss1d(0,5); hold on; h=plotgauss1d(0,2);set(h,'color','r')
+
+sigma = sqrt(sigma2);
+x = linspace(mu-3*sigma, mu+3*sigma, 100);
+p = gaussian_prob(x, mu, sigma2);
+h = plot(x, p, '-');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotgauss2d.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotgauss2d.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,130 @@
+function h=plotgauss2d(mu, Sigma)
+% PLOTGAUSS2D Plot a 2D Gaussian as an ellipse with optional cross hairs
+% h=plotgauss2(mu, Sigma)
+%
+
+h = plotcov2(mu, Sigma);
+return;
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+
+% PLOTCOV2 - Plots a covariance ellipse with major and minor axes
+% for a bivariate Gaussian distribution.
+%
+% Usage:
+% h = plotcov2(mu, Sigma[, OPTIONS]);
+%
+% Inputs:
+% mu - a 2 x 1 vector giving the mean of the distribution.
+% Sigma - a 2 x 2 symmetric positive semi-definite matrix giving
+% the covariance of the distribution (or the zero matrix).
+%
+% Options:
+% 'conf' - a scalar between 0 and 1 giving the confidence
+% interval (i.e., the fraction of probability mass to
+% be enclosed by the ellipse); default is 0.9.
+% 'num-pts' - the number of points to be used to plot the
+% ellipse; default is 100.
+%
+% This function also accepts options for PLOT.
+%
+% Outputs:
+% h - a vector of figure handles to the ellipse boundary and
+% its major and minor axes
+%
+% See also: PLOTCOV3
+
+% Copyright (C) 2002 Mark A. Paskin
+
+function h = plotcov2(mu, Sigma, varargin)
+
+if size(Sigma) ~= [2 2], error('Sigma must be a 2 by 2 matrix'); end
+if length(mu) ~= 2, error('mu must be a 2 by 1 vector'); end
+
+[p, ...
+ n, ...
+ plot_opts] = process_options(varargin, 'conf', 0.9, ...
+ 'num-pts', 100);
+h = [];
+holding = ishold;
+if (Sigma == zeros(2, 2))
+ z = mu;
+else
+ % Compute the Mahalanobis radius of the ellipsoid that encloses
+ % the desired probability mass.
+ k = conf2mahal(p, 2);
+ % The major and minor axes of the covariance ellipse are given by
+ % the eigenvectors of the covariance matrix. Their lengths (for
+ % the ellipse with unit Mahalanobis radius) are given by the
+ % square roots of the corresponding eigenvalues.
+ if (issparse(Sigma))
+ [V, D] = eigs(Sigma);
+ else
+ [V, D] = eig(Sigma);
+ end
+ % Compute the points on the surface of the ellipse.
+ t = linspace(0, 2*pi, n);
+ u = [cos(t); sin(t)];
+ w = (k * V * sqrt(D)) * u;
+ z = repmat(mu, [1 n]) + w;
+ % Plot the major and minor axes.
+ L = k * sqrt(diag(D));
+ h = plot([mu(1); mu(1) + L(1) * V(1, 1)], ...
+ [mu(2); mu(2) + L(1) * V(2, 1)], plot_opts{:});
+ hold on;
+ h = [h; plot([mu(1); mu(1) + L(2) * V(1, 2)], ...
+ [mu(2); mu(2) + L(2) * V(2, 2)], plot_opts{:})];
+end
+
+h = [h; plot(z(1, :), z(2, :), plot_opts{:})];
+if (~holding) hold off; end
+
+%%%%%%%%%%%%
+
+% CONF2MAHAL - Translates a confidence interval to a Mahalanobis
+% distance. Consider a multivariate Gaussian
+% distribution of the form
+%
+% p(x) = 1/sqrt((2 * pi)^d * det(C)) * exp((-1/2) * MD(x, m, inv(C)))
+%
+% where MD(x, m, P) is the Mahalanobis distance from x
+% to m under P:
+%
+% MD(x, m, P) = (x - m) * P * (x - m)'
+%
+% A particular Mahalanobis distance k identifies an
+% ellipsoid centered at the mean of the distribution.
+% The confidence interval associated with this ellipsoid
+% is the probability mass enclosed by it. Similarly,
+% a particular confidence interval uniquely determines
+% an ellipsoid with a fixed Mahalanobis distance.
+%
+% If X is an d dimensional Gaussian-distributed vector,
+% then the Mahalanobis distance of X is distributed
+% according to the Chi-squared distribution with d
+% degrees of freedom. Thus, the Mahalanobis distance is
+% determined by evaluating the inverse cumulative
+% distribution function of the chi squared distribution
+% up to the confidence value.
+%
+% Usage:
+%
+% m = conf2mahal(c, d);
+%
+% Inputs:
+%
+% c - the confidence interval
+% d - the number of dimensions of the Gaussian distribution
+%
+% Outputs:
+%
+% m - the Mahalanobis radius of the ellipsoid enclosing the
+% fraction c of the distribution's probability mass
+%
+% See also: MAHAL2CONF
+
+% Copyright (C) 2002 Mark A. Paskin
+
+function m = conf2mahal(c, d)
+
+m = chi2inv(c, d); % matlab stats toolbox
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/plotgauss2d_old.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotgauss2d_old.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function h=plotgauss2d_old(mu, Sigma, plot_cross)
+% PLOTGAUSS2D Plot a 2D Gaussian as an ellipse with optional cross hairs
+% h=plotgauss2(mu, Sigma)
+%
+% h=plotgauss2(mu, Sigma, 1) also plots the major and minor axes
+%
+% Example
+% clf; S=[2 1; 1 2]; plotgauss2d([0;0], S, 1); axis equal
+
+if nargin < 3, plot_cross = 0; end
+[V,D]=eig(Sigma);
+lam1 = D(1,1);
+lam2 = D(2,2);
+v1 = V(:,1);
+v2 = V(:,2);
+%assert(approxeq(v1' * v2, 0))
+if v1(1)==0
+ theta = 0; % horizontal
+else
+ theta = atan(v1(2)/v1(1));
+end
+a = sqrt(lam1);
+b = sqrt(lam2);
+h=plot_ellipse(mu(1), mu(2), theta, a,b);
+
+if plot_cross
+ mu = mu(:);
+ held = ishold;
+ hold on
+ minor1 = mu-a*v1; minor2 = mu+a*v1;
+ hminor = line([minor1(1) minor2(1)], [minor1(2) minor2(2)]);
+
+ major1 = mu-b*v2; major2 = mu+b*v2;
+ hmajor = line([major1(1) major2(1)], [major1(2) major2(2)]);
+ %set(hmajor,'color','r')
+ if ~held
+ hold off
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/polygon_area.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/polygon_area.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function a = polygon_area(x,y)
+% AREA Area of a planar polygon.
+% AREA(X,Y) Calculates the area of a 2-dimensional
+% polygon formed by vertices with coordinate vectors
+% X and Y. The result is direction-sensitive: the
+% area is positive if the bounding contour is counter-
+% clockwise and negative if it is clockwise.
+%
+% See also TRAPZ.
+
+% Copyright (c) 1995 by Kirill K. Pankratov,
+% kirill@plume.mit.edu.
+% 04/20/94, 05/20/95
+
+ % Make polygon closed .............
+x = [x(:); x(1)];
+y = [y(:); y(1)];
+
+ % Calculate contour integral Int -y*dx (same as Int x*dy).
+lx = length(x);
+a = -(x(2:lx)-x(1:lx-1))'*(y(1:lx-1)+y(2:lx))/2;
+a = abs(a);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/polygon_centroid.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/polygon_centroid.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,79 @@
+function [x0,y0] = centroid(x,y)
+% CENTROID Center of mass of a polygon.
+% [X0,Y0] = CENTROID(X,Y) Calculates centroid
+% (center of mass) of planar polygon with vertices
+% coordinates X, Y.
+% Z0 = CENTROID(X+i*Y) returns Z0=X0+i*Y0 the same
+% as CENTROID(X,Y).
+
+% Copyright (c) 1995 by Kirill K. Pankratov,
+% kirill@plume.mit.edu.
+% 06/01/95, 06/07/95
+
+% Algorithm:
+% X0 = Int{x*ds}/Int{ds}, where ds - area element
+% so that Int{ds} is total area of a polygon.
+% Using Green's theorem the area integral can be
+% reduced to a contour integral:
+% Int{x*ds} = -Int{x^2*dy}, Int{ds} = Int{x*dy} along
+% the perimeter of a polygon.
+% For a polygon as a sequence of line segments
+% this can be reduced exactly to a sum:
+% Int{x^2*dy} = Sum{ (x_{i}^2+x_{i+1}^2+x_{i}*x_{i+1})*
+% (y_{i+1}-y_{i})}/3;
+% Int{x*dy} = Sum{(x_{i}+x_{i+1})(y_{i+1}-y_{i})}/2.
+% Similarly
+% Y0 = Int{y*ds}/Int{ds}, where
+% Int{y*ds} = Int{y^2*dx} =
+% = Sum{ (y_{i}^2+y_{i+1}^2+y_{i}*y_{i+1})*
+% (x_{i+1}-x_{i})}/3.
+
+ % Handle input ......................
+if nargin==0, help centroid, return, end
+if nargin==1
+ sz = size(x);
+ if sz(1)==2 % Matrix 2 by n
+ y = x(2,:); x = x(1,:);
+ elseif sz(2)==2 % Matrix n by 2
+ y = x(:,2); x = x(:,1);
+ else
+ y = imag(x);
+ x = real(x);
+ end
+end
+
+ % Make a polygon closed ..............
+x = [x(:); x(1)];
+y = [y(:); y(1)];
+
+ % Check length .......................
+l = length(x);
+if length(y)~=l
+ error(' Vectors x and y must have the same length')
+end
+
+ % X-mean: Int{x^2*dy} ................
+del = y(2:l)-y(1:l-1);
+v = x(1:l-1).^2+x(2:l).^2+x(1:l-1).*x(2:l);
+x0 = v'*del;
+
+ % Y-mean: Int{y^2*dx} ................
+del = x(2:l)-x(1:l-1);
+v = y(1:l-1).^2+y(2:l).^2+y(1:l-1).*y(2:l);
+y0 = v'*del;
+
+ % Calculate area: Int{y*dx} ..........
+a = (y(1:l-1)+y(2:l))'*del;
+tol= 2*eps;
+if abs(a) 1
+ exportfig(h, filename, varargin{:}, args{:});
+else
+ exportfig(h, filename, args{:});
+end
+
+X = imread(filename,'png');
+height = size(X,1);
+width = size(X,2);
+delete(filename);
+f = figure( 'Name', 'Preview', ...
+ 'Menubar', 'none', ...
+ 'NumberTitle', 'off', ...
+ 'Visible', 'off');
+image(X);
+axis image;
+ax = findobj(f, 'type', 'axes');
+axesPos = [0 0 width height];
+set(ax, 'Units', 'pixels', ...
+ 'Position', axesPos, ...
+ 'Visible', 'off');
+figPos = get(f,'Position');
+rootSize = get(0,'ScreenSize');
+figPos(3:4) = axesPos(3:4);
+if figPos(1) + figPos(3) > rootSize(3)
+ figPos(1) = rootSize(3) - figPos(3) - 50;
+end
+if figPos(2) + figPos(4) > rootSize(4)
+ figPos(2) = rootSize(4) - figPos(4) - 50;
+end
+set(f, 'Position',figPos, ...
+ 'Visible', 'on');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/process_options.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/process_options.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,132 @@
+% PROCESS_OPTIONS - Processes options passed to a Matlab function.
+% This function provides a simple means of
+% parsing attribute-value options. Each option is
+% named by a unique string and is given a default
+% value.
+%
+% Usage: [var1, var2, ..., varn[, unused]] = ...
+% process_options(args, ...
+% str1, def1, str2, def2, ..., strn, defn)
+%
+% Arguments:
+% args - a cell array of input arguments, such
+% as that provided by VARARGIN. Its contents
+% should alternate between strings and
+% values.
+% str1, ..., strn - Strings that are associated with a
+% particular variable
+% def1, ..., defn - Default values returned if no option
+% is supplied
+%
+% Returns:
+% var1, ..., varn - values to be assigned to variables
+% unused - an optional cell array of those
+% string-value pairs that were unused;
+% if this is not supplied, then a
+% warning will be issued for each
+% option in args that lacked a match.
+%
+% Examples:
+%
+% Suppose we wish to define a Matlab function 'func' that has
+% required parameters x and y, and optional arguments 'u' and 'v'.
+% With the definition
+%
+% function y = func(x, y, varargin)
+%
+% [u, v] = process_options(varargin, 'u', 0, 'v', 1);
+%
+% calling func(0, 1, 'v', 2) will assign 0 to x, 1 to y, 0 to u, and 2
+% to v. The parameter names are insensitive to case; calling
+% func(0, 1, 'V', 2) has the same effect. The function call
+%
+% func(0, 1, 'u', 5, 'z', 2);
+%
+% will result in u having the value 5 and v having value 1, but
+% will issue a warning that the 'z' option has not been used. On
+% the other hand, if func is defined as
+%
+% function y = func(x, y, varargin)
+%
+% [u, v, unused_args] = process_options(varargin, 'u', 0, 'v', 1);
+%
+% then the call func(0, 1, 'u', 5, 'z', 2) will yield no warning,
+% and unused_args will have the value {'z', 2}. This behaviour is
+% useful for functions with options that invoke other functions
+% with options; all options can be passed to the outer function and
+% its unprocessed arguments can be passed to the inner function.
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+function [varargout] = process_options(args, varargin)
+
+% Check the number of input arguments
+n = length(varargin);
+if (mod(n, 2))
+ error('Each option must be a string/value pair.');
+end
+
+% Check the number of supplied output arguments
+if (nargout < (n / 2))
+ error('Insufficient number of output arguments given');
+elseif (nargout == (n / 2))
+ warn = 1;
+ nout = n / 2;
+else
+ warn = 0;
+ nout = n / 2 + 1;
+end
+
+% Set outputs to be defaults
+varargout = cell(1, nout);
+for i=2:2:n
+ varargout{i/2} = varargin{i};
+end
+
+% Now process all arguments
+nunused = 0;
+for i=1:2:length(args)
+ found = 0;
+ for j=1:2:n
+ if strcmpi(args{i}, varargin{j})
+ varargout{(j + 1)/2} = args{i + 1};
+ found = 1;
+ break;
+ end
+ end
+ if (~found)
+ if (warn)
+ warning(sprintf('Option ''%s'' not used.', args{i}));
+ args{i}
+ else
+ nunused = nunused + 1;
+ unused{2 * nunused - 1} = args{i};
+ unused{2 * nunused} = args{i + 1};
+ end
+ end
+end
+
+% Assign the unused arguments
+if (~warn)
+ if (nunused)
+ varargout{nout} = unused;
+ else
+ varargout{nout} = cell(0);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rand_psd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/rand_psd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function M = rand_psd(d, d2, k)
+% Create a random positive definite matrix of size d by d by k (k defaults to 1)
+% M = rand_psd(d, d2, k) default: d2 = d, k = 1
+
+if nargin<2, d2 = d; end
+if nargin<3, k = 1; end
+if d2 ~= d, error('must be square'); end
+
+M = zeros(d,d,k);
+for i=1:k
+ A = rand(d);
+ M(:,:,i) = A*A';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rectintC.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintC.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [overlap, normoverlap] = rectintC(A,B)
+%
+% A(i,:) = [x y w h]
+% B(j,:) = [x y w h]
+% overlap(i,j) = area of intersection
+% normoverlap(i,j) = overlap(i,j) / min(area(i), area(j))
+%
+% Same as built-in rectint, but faster and uses less memory (since avoids repmat).
+
+
+leftA = A(:,1);
+bottomA = A(:,2);
+rightA = leftA + A(:,3);
+topA = bottomA + A(:,4);
+
+leftB = B(:,1)';
+bottomB = B(:,2)';
+rightB = leftB + B(:,3)';
+topB = bottomB + B(:,4)';
+
+verbose = 0;
+[overlap, normoverlap] = rectintLoopC(leftA, rightA, topA, bottomA, leftB, rightB, topB, bottomB, verbose);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rectintLoopC.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintLoopC.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+
+#include "mex.h"
+#include
+
+#define MAX(x,y) ((x)>(y) ? (x) : (y))
+#define MIN(x,y) ((x)<(y) ? (x) : (y))
+
+void mexFunction(
+ int nlhs, mxArray *plhs[],
+ int nrhs, const mxArray *prhs[]
+ )
+{
+ int j,k,m,n,nzmax,*irs,*jcs, *irs2, *jcs2;
+ double *overlap, *overlap2, tmp, areaA, areaB;
+ double *leftA, *rightA, *topA, *bottomA;
+ double *leftB, *rightB, *topB, *bottomB;
+ double *verbose;
+
+ m = MAX(mxGetM(prhs[0]), mxGetN(prhs[0]));
+ n = MAX(mxGetM(prhs[4]), mxGetN(prhs[4]));
+ /* printf("A=%d, B=%d\n", m, n); */
+
+ leftA = mxGetPr(prhs[0]);
+ rightA = mxGetPr(prhs[1]);
+ topA = mxGetPr(prhs[2]);
+ bottomA = mxGetPr(prhs[3]);
+
+ leftB = mxGetPr(prhs[4]);
+ rightB = mxGetPr(prhs[5]);
+ topB = mxGetPr(prhs[6]);
+ bottomB = mxGetPr(prhs[7]);
+
+ verbose = mxGetPr(prhs[8]);
+
+ plhs[0] = mxCreateDoubleMatrix(m,n, mxREAL);
+ overlap = mxGetPr(plhs[0]);
+
+ plhs[1] = mxCreateDoubleMatrix(m,n, mxREAL);
+ overlap2 = mxGetPr(plhs[1]);
+
+ k = 0;
+ for (j = 0; (j < n); j++) {
+ int i;
+ for (i = 0; (i < m); i++) {
+ tmp = (MAX(0, MIN(rightA[i], rightB[j]) - MAX(leftA[i], leftB[j]) )) *
+ (MAX(0, MIN(topA[i], topB[j]) - MAX(bottomA[i], bottomB[j]) ));
+
+ if (tmp > 0) {
+ overlap[k] = tmp;
+
+ areaA = (rightA[i]-leftA[i])*(topA[i]-bottomA[i]);
+ areaB = (rightB[j]-leftB[j])*(topB[j]-bottomB[j]);
+ overlap2[k] = tmp/MIN(areaA, areaB);
+
+ if (*verbose) {
+ printf("j=%d,i=%d,overlap=%5.3f, norm=%5.3f\n", j,i, overlap[k], overlap2[k]);
+ }
+ }
+
+ k++;
+ }
+ }
+}
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rectintLoopC.dll
Binary file toolboxes/FullBNT-1.0.7/KPMtools/rectintLoopC.dll has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rectintSparse.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintSparse.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function [overlap, normoverlap] = rectintSparse(A,B)
+%
+% A(i,:) = [x y w h]
+% B(j,:) = [x y w h]
+% overlap(i,j) = area of intersection
+% normoverla(i,j)
+%
+% Same as built-in rectint, but uses less memory.
+% Use rectintSparseC for a faster version.
+%
+
+leftA = A(:,1);
+bottomA = A(:,2);
+rightA = leftA + A(:,3);
+topA = bottomA + A(:,4);
+
+leftB = B(:,1)';
+bottomB = B(:,2)';
+rightB = leftB + B(:,3)';
+topB = bottomB + B(:,4)';
+
+numRectA = size(A,1);
+numRectB = size(B,1);
+
+%out = rectintSparseLoopC(leftA, rightA, topA, bottomA, leftB, rightB, topB, bottomB);
+
+nnz = ceil(0.2*numRectA*numRectB); % guess of number of non-zeroes
+overlap = sparse([], [], [], numRectA, numRectB, nnz);
+normoverlap = sparse([], [], [], numRectA, numRectB, nnz);
+for j=1:numRectB
+ for i=1:numRectA
+ tmp = (max(0, min(rightA(i), rightB(j)) - max(leftA(i), leftB(j)) ) ) .* ...
+ (max(0, min(topA(i), topB(j)) - max(bottomA(i), bottomB(j)) ) );
+ if tmp>0
+ overlap(i,j) = tmp;
+ areaA = (rightA(i)-leftA(i))*(topA(i)-bottomA(i));
+ areaB = (rightB(j)-leftB(j))*(topB(j)-bottomB(j));
+ normoverlap(i,j) = min(tmp/areaA, tmp/areaB);
+ end
+ %fprintf('j=%d, i=%d, overlap=%5.3f, norm=%5.3f\n',...
+ % j, i, overlap(i,j), normoverlap(i,j));
+ end
+end
+
+
+if 0
+N = size(bboxDense01,2); % 1000;
+rect = bboxToRect(bboxDense01)';
+A = rect(1:2,:);
+B = rect(1:N,:);
+
+tic; out1 = rectint(A, B); toc
+tic; out2 = rectintSparse(A, B); toc
+tic; out3 = rectintSparseC(A, B); toc
+tic; out4 = rectintC(A, B); toc
+assert(approxeq(out1, out2))
+assert(approxeq(out1, full(out3)))
+assert(approxeq(out1, out4))
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseC.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseC.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function [overlap, normoverlap] = rectintSparseC(A,B)
+%
+% function [area, normarea] = rectintSparseC(A,B)
+% A(i,:) = [x y w h]
+% B(j,:) = [x y w h]
+% out(i,j) = area of intersection
+%
+% Same as built-in rectint, but uses less memory.
+% Also, returns area of overlap normalized by area of patch.
+% See rectintSparse
+
+if isempty(A) | isempty(B)
+ overlap = [];
+ normoverlap = [];
+ return;
+end
+
+leftA = A(:,1);
+bottomA = A(:,2);
+rightA = leftA + A(:,3);
+topA = bottomA + A(:,4);
+
+leftB = B(:,1)';
+bottomB = B(:,2)';
+rightB = leftB + B(:,3)';
+topB = bottomB + B(:,4)';
+
+numRectA = size(A,1);
+numRectB = size(B,1);
+
+verbose = 0;
+[overlap, normoverlap] = rectintSparseLoopC(leftA, rightA, topA, bottomA, leftB, rightB, topB, bottomB, verbose);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseLoopC.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseLoopC.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,147 @@
+/* This is based on
+http://www.mathworks.com/access/helpdesk/help/techdoc/matlab_external/ch04cr12.shtml
+
+See rectintSparse.m for the matlab version of this code.
+
+*/
+
+#include /* Needed for the ceil() prototype. */
+#include "mex.h"
+#include
+
+/* If you are using a compiler that equates NaN to be zero, you
+ * must compile this example using the flag -DNAN_EQUALS_ZERO.
+ * For example:
+ *
+ * mex -DNAN_EQUALS_ZERO fulltosparse.c
+ *
+ * This will correctly define the IsNonZero macro for your C
+ * compiler.
+ */
+
+#if defined(NAN_EQUALS_ZERO)
+#define IsNonZero(d) ((d) != 0.0 || mxIsNaN(d))
+#else
+#define IsNonZero(d) ((d) != 0.0)
+#endif
+
+#define MAX(x,y) ((x)>(y) ? (x) : (y))
+#define MIN(x,y) ((x)<(y) ? (x) : (y))
+
+void mexFunction(
+ int nlhs, mxArray *plhs[],
+ int nrhs, const mxArray *prhs[]
+ )
+{
+ /* Declare variables. */
+ int j,k,m,n,nzmax,*irs,*jcs, *irs2, *jcs2;
+ double *overlap, *overlap2, tmp, areaA, areaB;
+ double percent_sparse;
+ double *leftA, *rightA, *topA, *bottomA;
+ double *leftB, *rightB, *topB, *bottomB;
+ double *verbose;
+
+ /* Get the size and pointers to input data. */
+ m = MAX(mxGetM(prhs[0]), mxGetN(prhs[0]));
+ n = MAX(mxGetM(prhs[4]), mxGetN(prhs[4]));
+ /* printf("A=%d, B=%d\n", m, n); */
+
+ leftA = mxGetPr(prhs[0]);
+ rightA = mxGetPr(prhs[1]);
+ topA = mxGetPr(prhs[2]);
+ bottomA = mxGetPr(prhs[3]);
+
+ leftB = mxGetPr(prhs[4]);
+ rightB = mxGetPr(prhs[5]);
+ topB = mxGetPr(prhs[6]);
+ bottomB = mxGetPr(prhs[7]);
+
+ verbose = mxGetPr(prhs[8]);
+
+ /* Allocate space for sparse matrix.
+ * NOTE: Assume at most 20% of the data is sparse. Use ceil
+ * to cause it to round up.
+ */
+
+ percent_sparse = 0.01;
+ nzmax = (int)ceil((double)m*(double)n*percent_sparse);
+
+ plhs[0] = mxCreateSparse(m,n,nzmax,0);
+ overlap = mxGetPr(plhs[0]);
+ irs = mxGetIr(plhs[0]);
+ jcs = mxGetJc(plhs[0]);
+
+ plhs[1] = mxCreateSparse(m,n,nzmax,0);
+ overlap2 = mxGetPr(plhs[1]);
+ irs2 = mxGetIr(plhs[1]);
+ jcs2 = mxGetJc(plhs[1]);
+
+
+ /* Assign nonzeros. */
+ k = 0;
+ for (j = 0; (j < n); j++) {
+ int i;
+ jcs[j] = k;
+ jcs2[j] = k;
+ for (i = 0; (i < m); i++) {
+ tmp = (MAX(0, MIN(rightA[i], rightB[j]) - MAX(leftA[i], leftB[j]) )) *
+ (MAX(0, MIN(topA[i], topB[j]) - MAX(bottomA[i], bottomB[j]) ));
+
+ if (*verbose) {
+ printf("j=%d,i=%d,tmp=%5.3f\n", j,i,tmp);
+ }
+
+ if (IsNonZero(tmp)) {
+
+ /* Check to see if non-zero element will fit in
+ * allocated output array. If not, increase
+ * percent_sparse by 20%, recalculate nzmax, and augment
+ * the sparse array.
+ */
+ if (k >= nzmax) {
+ int oldnzmax = nzmax;
+ percent_sparse += 0.2;
+ nzmax = (int)ceil((double)m*(double)n*percent_sparse);
+
+ /* Make sure nzmax increases atleast by 1. */
+ if (oldnzmax == nzmax)
+ nzmax++;
+ printf("reallocating from %d to %d\n", oldnzmax, nzmax);
+
+ mxSetNzmax(plhs[0], nzmax);
+ mxSetPr(plhs[0], mxRealloc(overlap, nzmax*sizeof(double)));
+ mxSetIr(plhs[0], mxRealloc(irs, nzmax*sizeof(int)));
+ overlap = mxGetPr(plhs[0]);
+ irs = mxGetIr(plhs[0]);
+
+ mxSetNzmax(plhs[1], nzmax);
+ mxSetPr(plhs[1], mxRealloc(overlap2, nzmax*sizeof(double)));
+ mxSetIr(plhs[1], mxRealloc(irs2, nzmax*sizeof(int)));
+ overlap2 = mxGetPr(plhs[1]);
+ irs2 = mxGetIr(plhs[1]);
+ }
+
+ overlap[k] = tmp;
+ irs[k] = i;
+
+ areaA = (rightA[i]-leftA[i])*(topA[i]-bottomA[i]);
+ areaB = (rightB[j]-leftB[j])*(topB[j]-bottomB[j]);
+ overlap2[k] = MIN(tmp/areaA, tmp/areaB);
+ irs2[k] = i;
+
+ k++;
+ } /* IsNonZero */
+ } /* for i */
+ }
+ jcs[n] = k;
+ jcs2[n] = k;
+
+}
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseLoopC.dll
Binary file toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseLoopC.dll has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/repmatC.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/repmatC.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,149 @@
+/*
+mex -c mexutil.c
+mex repmat.c mexutil.obj
+to check for warnings:
+gcc -Wall -I/cygdrive/c/MATLAB6p1/extern/include -c repmat.c
+*/
+#include "mexutil.h"
+#include
+
+/* repeat a block of memory rep times */
+void memrep(char *dest, size_t chunk, int rep)
+{
+#if 0
+ /* slow way */
+ int i;
+ char *p = dest;
+ for(i=1;i>1);
+#endif
+}
+
+void repmat(char *dest, const char *src, int ndim, int *destdimsize,
+ int *dimsize, const int *dims, int *rep)
+{
+ int d = ndim-1;
+ int i, chunk;
+ /* copy the first repetition into dest */
+ if(d == 0) {
+ chunk = dimsize[0];
+ memcpy(dest,src,chunk);
+ }
+ else {
+ /* recursively repeat each slice of src */
+ for(i=0;i ndimdest) ndimdest = nrep;
+ rep = mxCalloc(ndimdest, sizeof(int));
+ for(i=0;i ndimdest) ndimdest = nrep;
+ rep = mxCalloc(ndimdest, sizeof(int));
+ for(i=0;i ndim) memrep(dest,destdimsize[ndim-1],extra_rep);
+ if(mxIsComplex(srcmat)) {
+ src = (char*)mxGetPi(srcmat);
+ dest = (char*)mxGetPi(plhs[0]);
+ repmat(dest,src,ndim,destdimsize,dimsize,dims,rep);
+ if(ndimdest > ndim) memrep(dest,destdimsize[ndim-1],extra_rep);
+ }
+}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/repmatC.dll
Binary file toolboxes/FullBNT-1.0.7/KPMtools/repmatC.dll has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rgb2grayKPM.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/rgb2grayKPM.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function g = rgb2grayKPM(rgb)
+% function g = rgb2grayKPM(rgb)
+% rgb2grayKPM Like the built-in function, but if r is already gray, does not cause an error
+
+[nr nc ncolors] = size(rgb);
+if ncolors > 1
+ g = rgb2gray(rgb);
+else
+ g = rgb;
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rnd_partition.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/rnd_partition.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function [train, test] = rnd_partition(data, train_percent);
+% function [train, test] = rnd_partition(data, train_percent);
+%
+% data(:,i) is the i'th example
+% train_percent of these columns get put into train, the rest into test
+
+N = size(data, 2);
+ndx = randperm(N);
+k = ceil(N*train_percent);
+train_ndx = ndx(1:k);
+test_ndx = ndx(k+1:end);
+train = data(:, train_ndx);
+test = data(:, test_ndx);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/rotate_xlabel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/rotate_xlabel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+function hText = rotate_xlabel(degrees, newlabels)
+
+% Posted to comp.soft-sys.matlab on 2003-05-01 13:45:36 PST
+% by David Borger (borger@ix.netcom.com)
+
+xtl = get(gca,'XTickLabel');
+set(gca,'XTickLabel','');
+lxtl = length(xtl);
+xtl = newlabels;
+if 0 % nargin>1
+ lnl = length(newlabels);
+ if lnl~=lxtl
+ error('Number of new labels must equal number of old');
+ end;
+ xtl = newlabels;
+end;
+
+
+hxLabel=get(gca,'XLabel');
+xLP=get(hxLabel,'Position');
+y=xLP(2);
+XTick=get(gca,'XTick');
+y=repmat(y,length(XTick),1);
+%fs=get(gca,'fontsize');
+fs = 12;
+hText=text(XTick,y,xtl,'fontsize',fs);
+set(hText,'Rotation',degrees,'HorizontalAlignment','right');
+
+% Modifications by KPM
+
+ylim = get(gca,'ylim');
+height = ylim(2)-ylim(1);
+N = length(hText);
+for i=1:N
+ voffset = ylim(2) - 0*height;
+ set(hText(i), 'position', [i voffset 0]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/safeStr.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/safeStr.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function s = safeStr(s)
+% Change punctuation characters to they print properly
+
+s = strrep(s, '\', '/');
+s = strrep(s, '_', '-');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/sampleUniformInts.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/sampleUniformInts.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function M = sampleUniformInts(N, r, c)
+
+% M is an rxc matrix of integers in 1..N
+
+prob = normalize(ones(N,1));
+M = sample_discrete(prob, r, c);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/sample_discrete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/sample_discrete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function M = sample_discrete(prob, r, c)
+% SAMPLE_DISCRETE Like the built in 'rand', except we draw from a non-uniform discrete distrib.
+% M = sample_discrete(prob, r, c)
+%
+% Example: sample_discrete([0.8 0.2], 1, 10) generates a row vector of 10 random integers from {1,2},
+% where the prob. of being 1 is 0.8 and the prob of being 2 is 0.2.
+
+n = length(prob);
+
+if nargin == 1
+ r = 1; c = 1;
+elseif nargin == 2
+ c == r;
+end
+
+R = rand(r, c);
+M = ones(r, c);
+cumprob = cumsum(prob(:));
+
+if n < r*c
+ for i = 1:n-1
+ M = M + (R > cumprob(i));
+ end
+else
+ % loop over the smaller index - can be much faster if length(prob) >> r*c
+ cumprob2 = cumprob(1:end-1);
+ for i=1:r
+ for j=1:c
+ M(i,j) = sum(R(i,j) > cumprob2)+1;
+ end
+ end
+end
+
+
+% Slower, even though vectorized
+%cumprob = reshape(cumsum([0 prob(1:end-1)]), [1 1 n]);
+%M = sum(R(:,:,ones(n,1)) > cumprob(ones(r,1),ones(c,1),:), 3);
+
+% convert using a binning algorithm
+%M=bindex(R,cumprob);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/set_xtick_label.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/set_xtick_label.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+function set_xtick_label(tick_labels, angle, axis_label)
+% SET_XTICK_LABEL Print the xtick labels at an angle instead of horizontally
+% set_xtick_label(tick_labels, angle, axis_label)
+%
+% angle default = 90
+% axis_label default = ''
+%
+% This is derived from Solution Number: 5375 on mathworks.com
+% See set_xtick_label_demo for an example
+
+if nargin < 2, angle = 90; end
+if nargin < 3, axis_label = []; end
+
+% Reduce the size of the axis so that all the labels fit in the figure.
+pos = get(gca,'Position');
+%set(gca,'Position',[pos(1), .2, pos(3) .65])
+%set(gca,'Position',[pos(1), 0, pos(3) .45])
+%set(gca,'Position',[pos(1), 0.1, pos(3) 0.5])
+
+ax = axis; % Current axis limits
+axis(axis); % Fix the axis limits
+Yl = ax(3:4); % Y-axis limits
+
+%set(gca, 'xtick', 1:length(tick_labels));
+set(gca, 'xtick', 0.7:1:length(tick_labels));
+Xt = get(gca, 'xtick');
+
+% Place the text labels
+t = text(Xt,Yl(1)*ones(1,length(Xt)),tick_labels);
+set(t,'HorizontalAlignment','right','VerticalAlignment','top', 'Rotation', angle);
+
+% Remove the default labels
+set(gca,'XTickLabel','')
+
+% Get the Extent of each text object. This
+% loop is unavoidable.
+for i = 1:length(t)
+ ext(i,:) = get(t(i),'Extent');
+end
+
+% Determine the lowest point. The X-label will be
+% placed so that the top is aligned with this point.
+LowYPoint = min(ext(:,2));
+
+% Place the axis label at this point
+if ~isempty(axis_label)
+ Xl = get(gca, 'Xlim');
+ XMidPoint = Xl(1)+abs(diff(Xl))/2;
+ tl = text(XMidPoint,LowYPoint, axis_label, 'VerticalAlignment','top', ...
+ 'HorizontalAlignment','center');
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/set_xtick_label_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/set_xtick_label_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+
+% Generate some test data. Assume that the X-axis represents months.
+x = 1:12;
+y = 10*rand(1,length(x));
+
+% Plot the data.
+h = plot(x,y,'+');
+
+% Add a title.
+title('This is a title')
+
+% Set the X-Tick locations so that every other month is labeled.
+Xt = 1:2:11;
+Xl = [1 12];
+set(gca,'XTick',Xt,'XLim',Xl);
+
+% Add the months as tick labels.
+months = ['Jan';
+ 'Feb';
+ 'Mar';
+ 'Apr';
+ 'May';
+ 'Jun';
+ 'Jul';
+ 'Aug';
+ 'Sep';
+ 'Oct';
+ 'Nov';
+ 'Dec'];
+
+set_xtick_label(months(1:2:12, :), 90, 'xaxis label');
+
+
+
+if 0
+
+
+% Generate some test data. Assume that the X-axis represents months.
+x = 1:8;
+y = 10*rand(1,length(x));
+
+% Plot the data.
+h = plot(x,y,'+');
+
+S = subsets(1:3);
+str = cell(1,8);
+for i=1:2^3
+ str{i} = num2str(S{i});
+end
+set_xtick_label(str);
+
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/setdiag.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/setdiag.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function M = setdiag(M, v)
+% SETDIAG Set the diagonal of a matrix to a specified scalar/vector.
+% M = set_diag(M, v)
+
+n = length(M);
+if length(v)==1
+ v = repmat(v, 1, n);
+end
+
+% e.g., for 3x3 matrix, elements are numbered
+% 1 4 7
+% 2 5 8
+% 3 6 9
+% so diagnoal = [1 5 9]
+
+
+J = 1:n+1:n^2;
+M(J) = v;
+
+%M = triu(M,1) + tril(M,-1) + diag(v);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/softeye.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/softeye.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function M = softeye(K, p)
+% SOFTEYE Make a stochastic matrix with p on the diagonal, and the remaining mass distributed uniformly
+% M = softeye(K, p)
+%
+% M is a K x K matrix.
+
+M = p*eye(K);
+q = 1-p;
+for i=1:K
+ M(i, [1:i-1 i+1:K]) = q/(K-1);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/sort_evec.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/sort_evec.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [evec, evals] = sort_evec(temp_evec, temp_evals, N)
+
+if ~isvectorBNT(temp_evals)
+ temp_evals = diag(temp_evals);
+end
+
+% Eigenvalues nearly always returned in descending order, but just
+% to make sure.....
+[evals perm] = sort(-temp_evals);
+evals = -evals(1:N);
+if evals == temp_evals(1:N)
+ % Originals were in order
+ evec = temp_evec(:, 1:N);
+ return
+else
+ fprintf('sorting evec\n');
+ % Need to reorder the eigenvectors
+ for i=1:N
+ evec(:,i) = temp_evec(:,perm(i));
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/splitLongSeqIntoManyShort.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/splitLongSeqIntoManyShort.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function short = splitLongSeqIntoManyShort(long, Tsmall)
+% splitLongSeqIntoManyShort Put groups of columns into a cell array of narrower matrices
+% function short = splitLongSeqIntoManyShort(long, Tsmall)
+%
+% long(:,t)
+% short{i} = long(:,ndx1:ndx2) where each segment (except maybe the last) is of length Tsmall
+
+T = length(long);
+Nsmall = ceil(T/Tsmall);
+short = cell(Nsmall,1);
+
+t = 1;
+for i=1:Nsmall
+ short{i} = long(:,t:min(T,t+Tsmall-1));
+ t = t+Tsmall;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/sprintf_intvec.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/sprintf_intvec.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function s = sprintf_intvec(v)
+% SPRINTF_INTVEC Print a vector of ints as comma separated string, with no trailing comma
+% function s = sprintf_intvec(v)
+%
+% e.g., sprintf_intvec(1:3) returns '1,2,3'
+
+s = sprintf('%d,', v);
+s = s(1:end-1);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/sqdist.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/sqdist.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function m = sqdist(p, q, A)
+% SQDIST Squared Euclidean or Mahalanobis distance.
+% SQDIST(p,q) returns m(i,j) = (p(:,i) - q(:,j))'*(p(:,i) - q(:,j)).
+% SQDIST(p,q,A) returns m(i,j) = (p(:,i) - q(:,j))'*A*(p(:,i) - q(:,j)).
+
+% From Tom Minka's lightspeed toolbox
+
+[d, pn] = size(p);
+[d, qn] = size(q);
+
+if nargin == 2
+
+ pmag = sum(p .* p, 1);
+ qmag = sum(q .* q, 1);
+ m = repmat(qmag, pn, 1) + repmat(pmag', 1, qn) - 2*p'*q;
+ %m = ones(pn,1)*qmag + pmag'*ones(1,qn) - 2*p'*q;
+
+else
+
+ if isempty(A) | isempty(p)
+ error('sqdist: empty matrices');
+ end
+ Ap = A*p;
+ Aq = A*q;
+ pmag = sum(p .* Ap, 1);
+ qmag = sum(q .* Aq, 1);
+ m = repmat(qmag, pn, 1) + repmat(pmag', 1, qn) - 2*p'*Aq;
+
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/strmatch_multi.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/strmatch_multi.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function [posns] = strmatch_multi(keys, strs)
+% STRMATCH_MULTI Find where each key occurs in list of strings.
+% [pos] = strmatch_multi(key, strs) where key is a string and strs is a cell array of strings
+% works like the built-in command sequence pos = strmatch(key, strs, 'exact'),
+% except that pos is the first occurrence of key in strs; if there is no occurence, pos is 0.
+%
+% [posns] = strmatch_multi(keys, strs), where keys is a cell array of strings,
+% matches each element of keys. It loops over whichever is shorter, keys or strs.
+
+if ~iscell(keys), keys = {keys}; end
+nkeys = length(keys);
+posns = zeros(1, nkeys);
+if length(keys) < length(strs)
+ for i=1:nkeys
+ %pos = strmatch(keys{i}, strs, 'exact');
+ ndx = strcmp(keys{i}, strs); % faster
+ pos = find(ndx);
+ if ~isempty(pos)
+ posns(i) = pos(1);
+ end
+ end
+else
+ for s=1:length(strs)
+ %ndx = strmatch(strs{s}, keys, 'exact');
+ ndx = strcmp(strs{s}, keys);
+ ndx = find(ndx);
+ posns(ndx) = s;
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/strmatch_substr.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/strmatch_substr.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function ndx = strmatch_substr(str, strs)
+% STRMATCH_SUBSTR Like strmatch, except str can match any part of strs{i}, not just prefix.
+% ndx = strmatch_substr(str, strs)
+%
+% Example:
+% i = strmatch('max', {'max','minimax','maximum'})
+% returns i = [1; 3] since only 1 and 3 begin with max, but
+% i = strmatch_substr('max', {'max','minimax','maximum'})
+% returns i = [1;2;3];
+%
+% If str is also a cell array, it is like calling strmatch_substr several times
+% and concatenating the results.
+% Example:
+%
+% i = strmatch_substr({'foo', 'dog'}, {'foo', 'hoofoo', 'dog'})
+% returns i = [1;2;3]
+
+ndx = [];
+if ~iscell(str), str = {str}; end
+for j=1:length(str)
+ for i=1:length(strs)
+ %ind = strfind(strs{i}, str{j}); % not supported in 6.0
+ ind = findstr(strs{i}, str{j});
+ if ~isempty(ind)
+ ndx = [ndx; i];
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/strsplit.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/strsplit.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+function parts = strsplit(splitstr, str, option)
+%STRSPLIT Split string into pieces.
+%
+% STRSPLIT(SPLITSTR, STR, OPTION) splits the string STR at every occurrence
+% of SPLITSTR and returns the result as a cell array of strings. By default,
+% SPLITSTR is not included in the output.
+%
+% STRSPLIT(SPLITSTR, STR, OPTION) can be used to control how SPLITSTR is
+% included in the output. If OPTION is 'include', SPLITSTR will be included
+% as a separate string. If OPTION is 'append', SPLITSTR will be appended to
+% each output string, as if the input string was split at the position right
+% after the occurrence SPLITSTR. If OPTION is 'omit', SPLITSTR will not be
+% included in the output.
+
+% Author: Peter J. Acklam
+% Time-stamp: 2004-09-22 08:48:01 +0200
+% E-mail: pjacklam@online.no
+% URL: http://home.online.no/~pjacklam
+
+ nargsin = nargin;
+ error(nargchk(2, 3, nargsin));
+ if nargsin < 3
+ option = 'omit';
+ else
+ option = lower(option);
+ end
+
+ splitlen = length(splitstr);
+ parts = {};
+
+ while 1
+
+ k = strfind(str, splitstr);
+ if isempty(k)
+ parts{end+1} = str;
+ break
+ end
+
+ switch option
+ case 'include'
+ parts(end+1:end+2) = {str(1:k(1)-1), splitstr};
+ case 'append'
+ parts{end+1} = str(1 : k(1)+splitlen-1);
+ case 'omit'
+ parts{end+1} = str(1 : k(1)-1);
+ otherwise
+ error(['Invalid option string -- ', option]);
+ end
+
+
+ str = str(k(1)+splitlen : end);
+
+ end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/subplot2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/subplot2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function subplot2(nrows, ncols, i, j)
+% function subplot2(nrows, ncols, i, j)
+
+
+sz = [nrows ncols];
+%k = sub2ind(sz, i, j)
+k = sub2ind(sz(end:-1:1), j, i);
+subplot(nrows, ncols, k);
+
+if 0
+ ncols_plot = ceil(sqrt(Nplots));
+ nrows_plot = ceil(Nplots/ncols_plot);
+ Nplots = nrows_plot*ncols_plot;
+ for p=1:Nplots
+ subplot(nrows_plot, ncols_plot, p);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/subplot3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/subplot3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function fignum = subplot3(nrows, ncols, fignumBase, plotnumBase)
+% function subplot3(nrows, ncols, fignumBase, plotnumBase)
+% Choose a subplot number, opening a new figure if necessary
+% eg nrows=2, ncols = 2, we plot on (fignum, plotnum) = (1,1), (1,2), (1,3), (1,4), (2,1), ...
+
+nplotsPerFig = nrows*ncols;
+fignum = fignumBase + div(plotnumBase-1, nplotsPerFig);
+plotnum = wrap(plotnumBase, nplotsPerFig);
+figure(fignum);
+if plotnum==1, clf; end
+subplot(nrows, ncols, plotnum);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/subsets.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/subsets.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,58 @@
+function [T, bitv] = subsets(S, U, L, sorted, N)
+% SUBSETS Create a set of all the subsets of S which have cardinality <= U and >= L
+% T = subsets(S, U, L)
+% U defaults to length(S), L defaults to 0.
+% So subsets(S) generates the powerset of S.
+%
+% Example:
+% T = subsets(1:4, 2, 1)
+% T{:} = 1, 2, [1 2], 3, [1 3], [2 3], 4, [1 4], [2 4], [3 4]
+%
+% T = subsets(S, U, L, sorted)
+% If sorted=1, return the subsets in increasing size
+%
+% Example:
+% T = subsets(1:4, 2, 1, 1)
+% T{:} = 1, 2, 3, 4, [1 2], [1 3], [2 3], [1 4], [2 4], [3 4]
+%
+% [T, bitv] = subsets(S, U, L, sorted, N)
+% Row i of bitv is a bit vector representation of T{i},
+% where bitv has N columns (representing 1:N).
+% N defaults to max(S).
+%
+% Example:
+% [T,bitv] = subsets(2:4, 2^3, 0, 0, 5)
+% T{:} = [], 2, 3, [2 3], 4, [2 4], [3 4], [2 3 4]
+% bitv=
+% 0 0 0 0 0
+% 0 1 0 0 0
+% 0 0 1 0 0
+% 0 1 1 0 0
+% 0 0 0 1 0
+% 0 1 0 1 0
+% 0 0 1 1 0
+% 0 1 1 1 0
+
+n = length(S);
+
+if nargin < 2, U = n; end
+if nargin < 3, L = 0; end
+if nargin < 4, sorted = 0; end
+if nargin < 5, N = max(S); end
+
+bits = ind2subv(2*ones(1,n), 1:2^n)-1;
+sm = sum(bits,2);
+masks = bits((sm <= U) & (sm >= L), :);
+m = size(masks, 1);
+T = cell(1, m);
+for i=1:m
+ s = S(find(masks(i,:)));
+ T{i} = s;
+end
+
+if sorted
+ T = sortcell(T);
+end
+
+bitv = zeros(m, N);
+bitv(:, S) = masks;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/subsets1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/subsets1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+function sub_s=subsets1(s,k)
+% SUBSETS1 creates sub-sets of a specific from a given set
+% SS = subsets1(S, k)
+%
+% S is the given set
+% k is the required sub-sets size
+%
+% Example:
+%
+% >> ss=subsets1([1:4],3);
+% >> ss{:}
+% ans =
+% 1 2 3
+% ans =
+% 1 2 4
+% ans =
+% 1 3 4
+% ans =
+% 2 3 4
+%
+% Written by Raanan Yehezkel, 2004
+
+if k<0 % special case
+ error('subset size must be positive');
+elseif k==0 % special case
+ sub_s={[]};
+else
+ l=length(s);
+ ss={};
+ if l>=k
+ if k==1 % Exit condition
+ for I=1:l
+ ss{I}=s(I);
+ end
+ else
+ for I=1:l
+ ss1=subsets1(s([(I+1):l]),k-1);
+ for J=1:length(ss1)
+ ss{end+1}=[s(I),ss1{J}];
+ end
+ end
+ end
+ end
+ sub_s=ss;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/subsetsFixedSize.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/subsetsFixedSize.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+function sub_s=subsets1(s,k)
+% SUBSETS1 creates sub-sets of a specific size from a given set
+% SS = subsets1(S, k)
+%
+% S is the given set
+% k is the required sub-sets size
+%
+% Example:
+%
+% >> ss=subsets1([1:4],3);
+% >> ss{:}
+% ans =
+% 1 2 3
+% ans =
+% 1 2 4
+% ans =
+% 1 3 4
+% ans =
+% 2 3 4
+%
+% Written by Raanan Yehezkel, 2004
+
+if k<0 % special case
+ error('subset size must be positive');
+elseif k==0 % special case
+ sub_s={[]};
+else
+ l=length(s);
+ ss={};
+ if l>=k
+ if k==1 % Exit condition
+ for I=1:l
+ ss{I}=s(I);
+ end
+ else
+ for I=1:l
+ ss1=subsets1(s([(I+1):l]),k-1);
+ for J=1:length(ss1)
+ ss{end+1}=[s(I),ss1{J}];
+ end
+ end
+ end
+ end
+ sub_s=ss;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/subv2ind.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/subv2ind.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function index = subv2ind(siz,sub)
+%SUBV2IND Linear index from subscript vector.
+% SUBV2IND(SIZ,SUB) returns an equivalent single index corresponding to a
+% subscript vector for an array of size SIZ.
+% If SUB is a matrix, with subscript vectors as rows, then the result is a
+% column vector.
+%
+% This is the opposite of IND2SUBV, so that
+% SUBV2IND(SIZ,IND2SUBV(SIZ,IND)) == IND.
+%
+% See also IND2SUBV, SUB2IND.
+
+%index = subv2indTest(siz,sub);
+prev_cum_size = [1 cumprod(siz(1:end-1))];
+%index = (sub-1)*prev_cum_size' + 1;
+index = sub*prev_cum_size' - sum(prev_cum_size) + 1;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/subv2indKPM.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/subv2indKPM.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,89 @@
+/* C mex version of subv2ind*/
+/* 2 inputs, 1 output */
+/* siz, subv */
+/* ndx */
+#include "mex.h"
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
+ int i, j, k, nCol, nRow, binary, temp;
+ double *pSize, *pSubv, *pr;
+ int *cumprod;
+
+ pSize = mxGetPr(prhs[0]);
+ pSubv = mxGetPr(prhs[1]);
+ nCol = mxGetNumberOfElements(prhs[0]);
+ nRow = mxGetM(prhs[1]);
+
+
+ if(mxIsEmpty(prhs[1])){
+ plhs[0] = mxCreateDoubleMatrix(0, 0, mxREAL);
+ return;
+ }
+
+ if(mxIsEmpty(prhs[0])){
+ plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
+ *mxGetPr(plhs[0]) = 1;
+ return;
+ }
+
+ binary = 2;
+ for (i = 0; i < nCol; i++){
+ if (pSize[i] > 2.0){
+ binary = 0;
+ break;
+ }
+ else if(pSize[i] == 1.0){
+ binary = 1;
+ }
+ }
+
+ plhs[0] = mxCreateDoubleMatrix(nRow, 1, mxREAL);
+ pr = mxGetPr(plhs[0]);
+ for(i=0; i max_y), max_y=pos(4)+pos(2)+ff/5*2;end;
+ else,
+ oldtitle = h(i);
+ end
+end
+
+if max_y > plotregion,
+ scale = (plotregion-min_y)/(max_y-min_y);
+ for i=1:length(h),
+ pos = get(h(i),'position');
+ pos(2) = (pos(2)-min_y)*scale+min_y;
+ pos(4) = pos(4)*scale-(1-scale)*ff/5*3;
+ set(h(i),'position',pos);
+ end
+end
+
+np = get(gcf,'nextplot');
+set(gcf,'nextplot','add');
+if (oldtitle),
+ delete(oldtitle);
+end
+ha=axes('pos',[0 1 1 1],'visible','off','Tag','suptitle');
+ht=text(.5,titleypos-1,str);set(ht,'horizontalalignment','center','fontsize',fs);
+set(gcf,'nextplot',np);
+axes(haold);
+if nargout,
+ hout=ht;
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/unaryEncoding.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/unaryEncoding.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function U = unaryEncoding(data, K)
+% unaryEncoding Encode data(s) as a 1-of-K column vector
+% function U = unaryEncoding(data, K)
+%
+% eg.
+% If data = [3 2 2] and K=3,
+% then U = [0 0 0
+% 0 1 1
+% 1 0 0]
+
+if nargin < 2, K = max(data); end
+N = length(data);
+U = zeros(K,N);
+ndx = subv2ind([K N], [data(:)'; 1:N]');
+U(ndx) = 1;
+U = reshape(U, [K N]);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/wrap.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/wrap.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function v = wrap(u,N)
+% WRAP Wrap a vector of indices around a torus.
+% v = wrap(u,N)
+%
+% e.g., wrap([-1 0 1 2 3 4], 3) = 2 3 1 2 3 1
+
+v = mod(u-1,N)+1;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/xticklabel_rotate90.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/xticklabel_rotate90.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,68 @@
+function xticklabel_rotate90(XTick,varargin)
+%XTICKLABEL_ROTATE90 - Rotate numeric Xtick labels by 90 degrees
+%
+% Syntax: xticklabel_rotate90(XTick)
+%
+% Input: XTick - vector array of XTick positions & values (numeric)
+%
+% Output: none
+%
+% Example 1: Set the positions of the XTicks and rotate them
+% figure; plot([1960:2004],randn(45,1)); xlim([1960 2004]);
+% xticklabel_rotate90([1960:2:2004]);
+% %If you wish, you may set a few text "Property-value" pairs
+% xticklabel_rotate90([1960:2:2004],'Color','m','Fontweight','bold');
+%
+% Example 2: %Rotate XTickLabels at their current position
+% XTick = get(gca,'XTick');
+% xticklabel_rotate90(XTick);
+%
+% Other m-files required: none
+% Subfunctions: none
+% MAT-files required: none
+%
+% See also: TEXT, SET
+
+% Author: Denis Gilbert, Ph.D., physical oceanography
+% Maurice Lamontagne Institute, Dept. of Fisheries and Oceans Canada
+% email: gilbertd@dfo-mpo.gc.ca Web: http://www.qc.dfo-mpo.gc.ca/iml/
+% February 1998; Last revision: 24-Mar-2003
+
+if ~isnumeric(XTick)
+ error('XTICKLABEL_ROTATE90 requires a numeric input argument');
+end
+
+%Make sure XTick is a column vector
+XTick = XTick(:);
+
+%Set the Xtick locations and set XTicklabel to an empty string
+set(gca,'XTick',XTick,'XTickLabel','')
+
+% Define the xtickLabels
+xTickLabels = num2str(XTick);
+
+% Determine the location of the labels based on the position
+% of the xlabel
+hxLabel = get(gca,'XLabel'); % Handle to xlabel
+xLabelString = get(hxLabel,'String');
+
+if ~isempty(xLabelString)
+ warning('You may need to manually reset the XLABEL vertical position')
+end
+
+set(hxLabel,'Units','data');
+xLabelPosition = get(hxLabel,'Position');
+y = xLabelPosition(2);
+
+%CODE below was modified following suggestions from Urs Schwarz
+y=repmat(y,size(XTick,1),1);
+% retrieve current axis' fontsize
+fs = get(gca,'fontsize');
+
+% Place the new xTickLabels by creating TEXT objects
+hText = text(XTick, y, xTickLabels,'fontsize',fs);
+
+% Rotate the text objects by 90 degrees
+set(hText,'Rotation',90,'HorizontalAlignment','right',varargin{:})
+
+%------------- END OF CODE --------------
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/zipload.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/zipload.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+%ZIPLOAD Load compressed data file created with ZIPSAVE
+%
+% [data] = zipload( filename )
+% filename: string variable that contains the name of the
+% compressed file (do not include '.zip' extension)
+% Use only with files created with 'zipsave'
+% pkzip25.exe has to be in the matlab path. This file is a compression utility
+% made by Pkware, Inc. It can be dowloaded from: http://www.pkware.com
+% Or directly from ftp://ftp.pkware.com/pk250c32.exe, for the Windows 95/NT version.
+% This function was tested using 'PKZIP 2.50 Command Line for Windows 9x/NT'
+% It is important to use version 2.5 of the utility. Otherwise the command line below
+% has to be changed to include the proper options of the compression utility you
+% wish to use.
+% This function was tested in MATLAB Version 5.3 under Windows NT.
+% Fernando A. Brucher - May/25/1999
+%
+% Example:
+% [loadedData] = zipload('testfile');
+%--------------------------------------------------------------------
+
+function [data] = zipload( filename )
+
+%--- Decompress data file by calling pkzip (comand line command) ---
+% Options used:
+% 'extract' = decompress file
+% 'silent' = no console output
+% 'over=all' = overwrite files
+
+%eval( ['!pkzip25 -extract -silent -over=all ', filename, '.zip'] )
+eval( ['!pkzip25 -extract -silent -over=all ', filename, '.zip'] )
+
+
+%--- Load data from decompressed file ---
+% try, catch takes care of cases when pkzip fails to decompress a
+% valid matlab format file
+
+try
+ tmpStruc = load( filename );
+ data = tmpStruc.data;
+catch, return, end
+
+
+%--- Delete decompressed file ---
+
+delete( [filename,'.mat'] )
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/KPMtools/zipsave.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/KPMtools/zipsave.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+%ZIPSAVE Save data in compressed format
+%
+% zipsave( filename, data )
+% filename: string variable that contains the name of the resulting
+% compressed file (do not include '.zip' extension)
+% pkzip25.exe has to be in the matlab path. This file is a compression utility
+% made by Pkware, Inc. It can be dowloaded from: http://www.pkware.com
+% This function was tested using 'PKZIP 2.50 Command Line for Windows 9x/NT'
+% It is important to use version 2.5 of the utility. Otherwise the command line below
+% has to be changed to include the proper options of the compression utility you
+% wish to use.
+% This function was tested in MATLAB Version 5.3 under Windows NT.
+% Fernando A. Brucher - May/25/1999
+%
+% Example:
+% testData = [1 2 3; 4 5 6; 7 8 9];
+% zipsave('testfile', testData);
+%
+% Modified by Kevin Murphy, 26 Feb 2004, to use winzip
+%------------------------------------------------------------------------
+
+function zipsave( filename, data )
+
+%--- Save data in a temporary file in matlab format (.mat)---
+
+eval( ['save ''', filename, ''' data'] )
+
+
+%--- Compress data by calling pkzip (comand line command) ---
+% Options used:
+% 'add' = add compressed files to the resulting zip file
+% 'silent' = no console output
+% 'over=all' = overwrite files
+
+%eval( ['!pkzip25 -silent -add -over=all ', filename, '.zip ', filename,'.mat'] )
+eval( ['!zip ', filename, '.zip ', filename,'.mat'] )
+
+%--- Delete temporary matlab format file ---
+
+delete( [filename,'.mat'] )
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/AR_to_SS.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/AR_to_SS.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function [F,H,Q,R,initx, initV] = AR_to_SS(coef, C, y)
+%
+% Convert a vector auto-regressive model of order k to state-space form.
+% [F,H,Q,R] = AR_to_SS(coef, C, y)
+%
+% X(i) = A(1) X(i-1) + ... + A(k) X(i-k+1) + v, where v ~ N(0, C)
+% and A(i) = coef(:,:,i) is the weight matrix for i steps ago.
+% We initialize the state vector with [y(:,k)' ... y(:,1)']', since
+% the state vector stores [X(i) ... X(i-k+1)]' in order.
+
+[s s2 k] = size(coef); % s is the size of the state vector
+bs = s * ones(1,k); % size of each block
+
+F = zeros(s*k);
+for i=1:k
+ F(block(1,bs), block(i,bs)) = coef(:,:,i);
+end
+for i=1:k-1
+ F(block(i+1,bs), block(i,bs)) = eye(s);
+end
+
+H = zeros(1*s, k*s);
+% we get to see the most recent component of the state vector
+H(block(1,bs), block(1,bs)) = eye(s);
+%for i=1:k
+% H(block(1,bs), block(i,bs)) = eye(s);
+%end
+
+Q = zeros(k*s);
+Q(block(1,bs), block(1,bs)) = C;
+
+R = zeros(s);
+
+initx = zeros(k*s, 1);
+for i=1:k
+ initx(block(i,bs)) = y(:, k-i+1); % concatenate the first k observation vectors
+end
+
+initV = zeros(k*s); % no uncertainty about the state (since perfectly observable)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+/AR_to_SS.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/README.txt/1.1.1.1/Mon Jun 7 14:39:28 2004//
+/SS_to_AR.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/convert_to_lagged_form.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/ensure_AR.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/eval_AR_perf.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/kalman_filter.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/kalman_forward_backward.m/1.1.1.1/Sat Nov 2 00:32:36 2002//
+/kalman_smoother.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/kalman_update.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/learn_AR.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/learn_AR_diagonal.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/learn_kalman.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/learning_demo.m/1.1.1.1/Wed Oct 23 15:17:42 2002//
+/sample_lds.m/1.1.1.1/Fri Jan 24 19:36:02 2003//
+/smooth_update.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/testKalman.m/1.1.1.1/Thu Jun 9 01:56:34 2005//
+/tracking_demo.m/1.1.1.1/Sat Jan 18 22:49:22 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/Kalman
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/README.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/README.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+Kalman filter toolbox written by Kevin Murphy, 1998.
+See http://www.ai.mit.edu/~murphyk/Software/kalman.html for details.
+
+Installation
+------------
+
+1. Install KPMtools from http://www.ai.mit.edu/~murphyk/Software/KPMtools.html
+3. Assuming you installed all these files in your matlab directory, In Matlab type
+
+addpath matlab/KPMtools
+addpath matlab/Kalman
+
+
+Demos
+-----
+See tracking_demo.m for a demo of 2D tracking.
+See learning_demo.m for a demo of parameter estimation using EM.
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/SS_to_AR.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/SS_to_AR.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [coef, C] = SS_to_AR(F, Q, k, diagonal)
+%
+% Extract the parameters of a vector autoregresssive process of order k from the state-space form.
+% [coef, C] = SS_to_AR(F, Q, k, diagonal)
+
+if nargin<4, diagonal = 0; end
+
+s = length(Q) / k;
+bs = s*ones(1,k);
+coef = zeros(s,s,k);
+for i=1:k
+ if diagonal
+ coef(:,:,i) = diag(diag(F(block(1,bs), block(i,bs))));
+ else
+ coef(:,:,i) = F(block(1,bs), block(i,bs));
+ end
+end
+C = Q(block(1,bs), block(1,bs));
+if diagonal
+ C = diag(diag(C));
+end
+%C = sqrt(Q(block(1,bs), block(1,bs))); % since cov(1,1) of full vector = C C'
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/convert_to_lagged_form.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/convert_to_lagged_form.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function yy = convert_to_lagged_form(y, k)
+% Create an observation vector yy(:,t) containing the last k values of y, newest first
+% e.g., k=2, y = (a1 a2 a3) yy = a2 a3
+% (b1 b2 b3) b2 b2
+% a1 a2
+% b1 b2
+
+[s T] = size(y);
+bs = s*ones(1,k);
+yy = zeros(k*s, T-k+1);
+for i=1:k
+ yy(block(i,bs), :) = y(:, k-i+1:end-i+1);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/ensure_AR.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/ensure_AR.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function [A, C, Q, R, initx, initV] = ensure_AR(A, C, Q, R, initx, initV, k, obs, diagonal)
+%
+% Ensure that the system matrices have the right form for an autoregressive process.
+
+ss = length(A);
+if nargin<8, obs=ones(ss, 1); end
+if nargin<9, diagonal=0; end
+
+[coef, C] = SS_to_AR(A, Q, k, diagonal);
+[A, C, Q, R, initx, initV] = AR_to_SS(coef, C, obs);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/eval_AR_perf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/eval_AR_perf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function [ypred, ll, mse] = eval_AR_perf(coef, C, y, model)
+% Evaluate the performance of an AR model.
+%
+% Inputs
+% coef(:,:,k,m) - coef. matrix to use for k steps back, model m
+% C(:,:,m) - cov. matrix for model m
+% y(:,t) - observation at time t
+% model(t) - which model to use at time t (defaults to 1 if not specified)
+%
+% Outputs
+% ypred(:,t) - the predicted value of y at t based on the evidence thru t-1.
+% ll - log likelihood
+% mse - mean squared error = sum_t d_t . d_t, where d_t = pred(y_t) - y(t)
+
+[s T] = size(y);
+k = size(coef, 3);
+M = size(coef, 4);
+
+if nargin<4, model = ones(1, T); end
+
+ypred = zeros(s, T);
+ypred(:, 1:k) = y(:, 1:k);
+mse = 0;
+ll = 0;
+for j=1:M
+ c(j) = log(normal_coef(C(:,:,j)));
+ invC(:,:,j) = inv(C(:,:,j));
+end
+coef = reshape(coef, [s s*k M]);
+
+for t=k+1:T
+ m = model(t-k);
+ past = y(:,t-1:-1:t-k);
+ ypred(:,t) = coef(:, :, m) * past(:);
+ d = ypred(:,t) - y(:,t);
+ mse = mse + d' * d;
+ ll = ll + c(m) - 0.5*(d' * invC(:,:,m) * d);
+end
+mse = mse / (T-k+1);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/kalman_filter.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/kalman_filter.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,101 @@
+function [x, V, VV, loglik] = kalman_filter(y, A, C, Q, R, init_x, init_V, varargin)
+% Kalman filter.
+% [x, V, VV, loglik] = kalman_filter(y, A, C, Q, R, init_x, init_V, ...)
+%
+% INPUTS:
+% y(:,t) - the observation at time t
+% A - the system matrix
+% C - the observation matrix
+% Q - the system covariance
+% R - the observation covariance
+% init_x - the initial state (column) vector
+% init_V - the initial state covariance
+%
+% OPTIONAL INPUTS (string/value pairs [default in brackets])
+% 'model' - model(t)=m means use params from model m at time t [ones(1,T) ]
+% In this case, all the above matrices take an additional final dimension,
+% i.e., A(:,:,m), C(:,:,m), Q(:,:,m), R(:,:,m).
+% However, init_x and init_V are independent of model(1).
+% 'u' - u(:,t) the control signal at time t [ [] ]
+% 'B' - B(:,:,m) the input regression matrix for model m
+%
+% OUTPUTS (where X is the hidden state being estimated)
+% x(:,t) = E[X(:,t) | y(:,1:t)]
+% V(:,:,t) = Cov[X(:,t) | y(:,1:t)]
+% VV(:,:,t) = Cov[X(:,t), X(:,t-1) | y(:,1:t)] t >= 2
+% loglik = sum{t=1}^T log P(y(:,t))
+%
+% If an input signal is specified, we also condition on it:
+% e.g., x(:,t) = E[X(:,t) | y(:,1:t), u(:, 1:t)]
+% If a model sequence is specified, we also condition on it:
+% e.g., x(:,t) = E[X(:,t) | y(:,1:t), u(:, 1:t), m(1:t)]
+
+[os T] = size(y);
+ss = size(A,1); % size of state space
+
+% set default params
+model = ones(1,T);
+u = [];
+B = [];
+ndx = [];
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i}
+ case 'model', model = args{i+1};
+ case 'u', u = args{i+1};
+ case 'B', B = args{i+1};
+ case 'ndx', ndx = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+end
+
+x = zeros(ss, T);
+V = zeros(ss, ss, T);
+VV = zeros(ss, ss, T);
+
+loglik = 0;
+for t=1:T
+ m = model(t);
+ if t==1
+ %prevx = init_x(:,m);
+ %prevV = init_V(:,:,m);
+ prevx = init_x;
+ prevV = init_V;
+ initial = 1;
+ else
+ prevx = x(:,t-1);
+ prevV = V(:,:,t-1);
+ initial = 0;
+ end
+ if isempty(u)
+ [x(:,t), V(:,:,t), LL, VV(:,:,t)] = ...
+ kalman_update(A(:,:,m), C(:,:,m), Q(:,:,m), R(:,:,m), y(:,t), prevx, prevV, 'initial', initial);
+ else
+ if isempty(ndx)
+ [x(:,t), V(:,:,t), LL, VV(:,:,t)] = ...
+ kalman_update(A(:,:,m), C(:,:,m), Q(:,:,m), R(:,:,m), y(:,t), prevx, prevV, ...
+ 'initial', initial, 'u', u(:,t), 'B', B(:,:,m));
+ else
+ i = ndx{t};
+ % copy over all elements; only some will get updated
+ x(:,t) = prevx;
+ prevP = inv(prevV);
+ prevPsmall = prevP(i,i);
+ prevVsmall = inv(prevPsmall);
+ [x(i,t), smallV, LL, VV(i,i,t)] = ...
+ kalman_update(A(i,i,m), C(:,i,m), Q(i,i,m), R(:,:,m), y(:,t), prevx(i), prevVsmall, ...
+ 'initial', initial, 'u', u(:,t), 'B', B(i,:,m));
+ smallP = inv(smallV);
+ prevP(i,i) = smallP;
+ V(:,:,t) = inv(prevP);
+ end
+ end
+ loglik = loglik + LL;
+end
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/kalman_forward_backward.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/kalman_forward_backward.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,97 @@
+% KALMAN_FORWARD_BACKWARD Forward Backward Propogation in Information Form
+%
+%
+% Note :
+%
+% M file accompanying my technical note
+%
+% A Technique for Painless Derivation of Kalman Filtering Recursions
+%
+% available from http://www.mbfys.kun.nl/~cemgil/papers/painless-kalman.ps
+%
+
+% Uses :
+
+% Change History :
+% Date Time Prog Note
+% 07-Jun-2001 2:24 PM ATC Created under MATLAB 5.3.1.29215a (R11.1)
+
+% ATC = Ali Taylan Cemgil,
+% SNN - University of Nijmegen, Department of Medical Physics and Biophysics
+% e-mail : cemgil@mbfys.kun.nl
+
+A = [1 1;0 1];
+C = [1 0];
+Q = eye(2)*0.01^2;
+R = 0.001^2;
+mu1 = [0;1];
+P1 = 3*Q;
+
+inv_Q = inv(Q);
+inv_R = inv(R);
+
+y = [0 1.1 2 2.95 3.78];
+
+T = length(y);
+L = size(Q,1);
+
+%%%%% Forward message Passing
+h_f = zeros(L, T);
+K_f = zeros(L, L, T);
+g_f = zeros(1, T);
+h_f_pre = zeros(L, T);
+K_f_pre = zeros(L, L, T);
+g_f_pre = zeros(1, T);
+
+
+K_f_pre(:, :, 1) = inv(P1);
+h_f_pre(:,1) = K_f_pre(:, :, 1)*mu1;
+g_f_pre(1) = -0.5*log(det(2*pi*P1)) - 0.5*mu1'*inv(P1)*mu1;
+
+for i=1:T,
+ h_f(:,i) = h_f_pre(:,i) + C'*inv_R*y(:,i);
+ K_f(:,:,i) = K_f_pre(:,:,i) + C'*inv_R*C;
+ g_f(i) = g_f_pre(i) -0.5*log(det(2*pi*R)) - 0.5*y(:,i)'*inv_R*y(:,i);
+ if i1,
+ M = inv(inv_Q + K_b(:,:,i));
+ h_b_post(:,i-1) = A'*inv(Q)*M*h_b(:,i);
+ K_b_post(:,:,i-1) = A'*inv_Q*(Q - M)*inv_Q*A;
+ g_b_post(i-1) = g_b(i) -0.5*log(det(2*pi*Q)) + 0.5*log(det(2*pi*M)) + 0.5*h_b(:,i)'*M*h_b(:,i);
+ end;
+end;
+
+
+%%%% Smoothed Estimates
+
+mu = zeros(size(h_f));
+Sig = zeros(size(K_f));
+g = zeros(size(g_f));
+lalpha = zeros(size(g_f));
+
+for i=1:T,
+ Sig(:,:,i) = inv(K_b_post(:,:,i) + K_f(:,:,i));
+ mu(:,i) = Sig(:,:,i)*(h_b_post(:,i) + h_f(:,i));
+ g(i) = g_b_post(i) + g_f(:,i);
+ lalpha(i) = g(i) + 0.5*log(det(2*pi*Sig(:,:,i))) + 0.5*mu(:,i)'*inv(Sig(:,:,i))*mu(:,i);
+end;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/kalman_smoother.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/kalman_smoother.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+function [xsmooth, Vsmooth, VVsmooth, loglik] = kalman_smoother(y, A, C, Q, R, init_x, init_V, varargin)
+% Kalman/RTS smoother.
+% [xsmooth, Vsmooth, VVsmooth, loglik] = kalman_smoother(y, A, C, Q, R, init_x, init_V, ...)
+%
+% The inputs are the same as for kalman_filter.
+% The outputs are almost the same, except we condition on y(:, 1:T) (and u(:, 1:T) if specified),
+% instead of on y(:, 1:t).
+
+[os T] = size(y);
+ss = length(A);
+
+% set default params
+model = ones(1,T);
+u = [];
+B = [];
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i}
+ case 'model', model = args{i+1};
+ case 'u', u = args{i+1};
+ case 'B', B = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+end
+
+xsmooth = zeros(ss, T);
+Vsmooth = zeros(ss, ss, T);
+VVsmooth = zeros(ss, ss, T);
+
+% Forward pass
+[xfilt, Vfilt, VVfilt, loglik] = kalman_filter(y, A, C, Q, R, init_x, init_V, ...
+ 'model', model, 'u', u, 'B', B);
+
+% Backward pass
+xsmooth(:,T) = xfilt(:,T);
+Vsmooth(:,:,T) = Vfilt(:,:,T);
+%VVsmooth(:,:,T) = VVfilt(:,:,T);
+
+for t=T-1:-1:1
+ m = model(t+1);
+ if isempty(B)
+ [xsmooth(:,t), Vsmooth(:,:,t), VVsmooth(:,:,t+1)] = ...
+ smooth_update(xsmooth(:,t+1), Vsmooth(:,:,t+1), xfilt(:,t), Vfilt(:,:,t), ...
+ Vfilt(:,:,t+1), VVfilt(:,:,t+1), A(:,:,m), Q(:,:,m), [], []);
+ else
+ [xsmooth(:,t), Vsmooth(:,:,t), VVsmooth(:,:,t+1)] = ...
+ smooth_update(xsmooth(:,t+1), Vsmooth(:,:,t+1), xfilt(:,t), Vfilt(:,:,t), ...
+ Vfilt(:,:,t+1), VVfilt(:,:,t+1), A(:,:,m), Q(:,:,m), B(:,:,m), u(:,t+1));
+ end
+end
+
+VVsmooth(:,:,1) = zeros(ss,ss);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/kalman_update.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/kalman_update.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+function [xnew, Vnew, loglik, VVnew] = kalman_update(A, C, Q, R, y, x, V, varargin)
+% KALMAN_UPDATE Do a one step update of the Kalman filter
+% [xnew, Vnew, loglik] = kalman_update(A, C, Q, R, y, x, V, ...)
+%
+% INPUTS:
+% A - the system matrix
+% C - the observation matrix
+% Q - the system covariance
+% R - the observation covariance
+% y(:) - the observation at time t
+% x(:) - E[X | y(:, 1:t-1)] prior mean
+% V(:,:) - Cov[X | y(:, 1:t-1)] prior covariance
+%
+% OPTIONAL INPUTS (string/value pairs [default in brackets])
+% 'initial' - 1 means x and V are taken as initial conditions (so A and Q are ignored) [0]
+% 'u' - u(:) the control signal at time t [ [] ]
+% 'B' - the input regression matrix
+%
+% OUTPUTS (where X is the hidden state being estimated)
+% xnew(:) = E[ X | y(:, 1:t) ]
+% Vnew(:,:) = Var[ X(t) | y(:, 1:t) ]
+% VVnew(:,:) = Cov[ X(t), X(t-1) | y(:, 1:t) ]
+% loglik = log P(y(:,t) | y(:,1:t-1)) log-likelihood of innovatio
+
+% set default params
+u = [];
+B = [];
+initial = 0;
+
+args = varargin;
+for i=1:2:length(args)
+ switch args{i}
+ case 'u', u = args{i+1};
+ case 'B', B = args{i+1};
+ case 'initial', initial = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+end
+
+% xpred(:) = E[X_t+1 | y(:, 1:t)]
+% Vpred(:,:) = Cov[X_t+1 | y(:, 1:t)]
+
+if initial
+ if isempty(u)
+ xpred = x;
+ else
+ xpred = x + B*u;
+ end
+ Vpred = V;
+else
+ if isempty(u)
+ xpred = A*x;
+ else
+ xpred = A*x + B*u;
+ end
+ Vpred = A*V*A' + Q;
+end
+
+e = y - C*xpred; % error (innovation)
+n = length(e);
+ss = length(A);
+S = C*Vpred*C' + R;
+Sinv = inv(S);
+ss = length(V);
+loglik = gaussian_prob(e, zeros(1,length(e)), S, 1);
+K = Vpred*C'*Sinv; % Kalman gain matrix
+% If there is no observation vector, set K = zeros(ss).
+xnew = xpred + K*e;
+Vnew = (eye(ss) - K*C)*Vpred;
+VVnew = (eye(ss) - K*C)*A*V;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/learn_AR.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/learn_AR.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function [coef, C] = learn_AR(data, k)
+% Find the ML parameters of a vector autoregressive process of order k.
+% [coef, C] = learn_AR(k, data)
+% data{l}(:,t) = the observations at time t in sequence l
+
+warning('learn_AR seems to be broken');
+
+nex = length(data);
+obs = cell(1, nex);
+for l=1:nex
+ obs{l} = convert_to_lagged_form(data{l}, k);
+end
+
+% The initial parameter values don't matter, since this is a perfectly observable problem.
+% However, the size of F must be set correctly.
+y = data{1};
+[s T] = size(y);
+coef = rand(s,s,k);
+C = rand_psd(s);
+[F,H,Q,R,initx,initV] = AR_to_SS(coef, C, y);
+
+max_iter = 1;
+fully_observed = 1;
+diagQ = 0;
+diagR = 0;
+[F, H, Q, R, initx, initV, loglik] = ...
+ learn_kalman(obs, F, H, Q, R, initx, initV, max_iter, diagQ, diagR, fully_observed);
+
+[coef, C] = SS_to_AR(F, Q, k);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/learn_AR_diagonal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/learn_AR_diagonal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+function [coef, C] = learn_AR_diagonal(y, k)
+% Find the ML parameters for a collection of independent scalar AR processes.
+
+% sep_coef(1,1,t,i) is the coefficient to apply to compopnent i of the state vector t steps ago
+% eg. consider two components L and R and let A = coef(:,:,1,:), B = coef(:,:,2,:)
+% L3 (AL 0 BL 0) (L2) (CL 0 0 0)
+% R3 = (0 AR 0 BR) (R2) (0 CR 0 0)
+% L2 (1 0 0 0 ) (L1) + (0 0 0 0)
+% R2 (0 1 0 0 ) (R1) (0 0 0 0)
+
+ss = size(y, 1);
+sep_coef = zeros(1,1,k,ss);
+for i=1:ss
+ [sep_coef(:,:,:,i), sep_cov(i)] = learn_AR(k, y(i,:));
+end
+C = diag(sep_cov);
+for t=1:k
+ x = sep_coef(1,1,t,:);
+ coef(:,:,t) = diag(x(:));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/learn_kalman.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/learn_kalman.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,182 @@
+function [A, C, Q, R, initx, initV, LL] = ...
+ learn_kalman(data, A, C, Q, R, initx, initV, max_iter, diagQ, diagR, ARmode, constr_fun, varargin)
+% LEARN_KALMAN Find the ML parameters of a stochastic Linear Dynamical System using EM.
+%
+% [A, C, Q, R, INITX, INITV, LL] = LEARN_KALMAN(DATA, A0, C0, Q0, R0, INITX0, INITV0) fits
+% the parameters which are defined as follows
+% x(t+1) = A*x(t) + w(t), w ~ N(0, Q), x(0) ~ N(init_x, init_V)
+% y(t) = C*x(t) + v(t), v ~ N(0, R)
+% A0 is the initial value, A is the final value, etc.
+% DATA(:,t,l) is the observation vector at time t for sequence l. If the sequences are of
+% different lengths, you can pass in a cell array, so DATA{l} is an O*T matrix.
+% LL is the "learning curve": a vector of the log lik. values at each iteration.
+% LL might go positive, since prob. densities can exceed 1, although this probably
+% indicates that something has gone wrong e.g., a variance has collapsed to 0.
+%
+% There are several optional arguments, that should be passed in the following order.
+% LEARN_KALMAN(DATA, A0, C0, Q0, R0, INITX0, INITV0, MAX_ITER, DIAGQ, DIAGR, ARmode)
+% MAX_ITER specifies the maximum number of EM iterations (default 10).
+% DIAGQ=1 specifies that the Q matrix should be diagonal. (Default 0).
+% DIAGR=1 specifies that the R matrix should also be diagonal. (Default 0).
+% ARMODE=1 specifies that C=I, R=0. i.e., a Gauss-Markov process. (Default 0).
+% This problem has a global MLE. Hence the initial parameter values are not important.
+%
+% LEARN_KALMAN(DATA, A0, C0, Q0, R0, INITX0, INITV0, MAX_ITER, DIAGQ, DIAGR, F, P1, P2, ...)
+% calls [A,C,Q,R,initx,initV] = f(A,C,Q,R,initx,initV,P1,P2,...) after every M step. f can be
+% used to enforce any constraints on the params.
+%
+% For details, see
+% - Ghahramani and Hinton, "Parameter Estimation for LDS", U. Toronto tech. report, 1996
+% - Digalakis, Rohlicek and Ostendorf, "ML Estimation of a stochastic linear system with the EM
+% algorithm and its application to speech recognition",
+% IEEE Trans. Speech and Audio Proc., 1(4):431--442, 1993.
+
+
+% learn_kalman(data, A, C, Q, R, initx, initV, max_iter, diagQ, diagR, ARmode, constr_fun, varargin)
+if nargin < 8, max_iter = 10; end
+if nargin < 9, diagQ = 0; end
+if nargin < 10, diagR = 0; end
+if nargin < 11, ARmode = 0; end
+if nargin < 12, constr_fun = []; end
+verbose = 1;
+thresh = 1e-4;
+
+
+if ~iscell(data)
+ N = size(data, 3);
+ data = num2cell(data, [1 2]); % each elt of the 3rd dim gets its own cell
+else
+ N = length(data);
+end
+
+N = length(data);
+ss = size(A, 1);
+os = size(C,1);
+
+alpha = zeros(os, os);
+Tsum = 0;
+for ex = 1:N
+ %y = data(:,:,ex);
+ y = data{ex};
+ T = length(y);
+ Tsum = Tsum + T;
+ alpha_temp = zeros(os, os);
+ for t=1:T
+ alpha_temp = alpha_temp + y(:,t)*y(:,t)';
+ end
+ alpha = alpha + alpha_temp;
+end
+
+previous_loglik = -inf;
+loglik = 0;
+converged = 0;
+num_iter = 1;
+LL = [];
+
+% Convert to inline function as needed.
+if ~isempty(constr_fun)
+ constr_fun = fcnchk(constr_fun,length(varargin));
+end
+
+
+while ~converged & (num_iter <= max_iter)
+
+ %%% E step
+
+ delta = zeros(os, ss);
+ gamma = zeros(ss, ss);
+ gamma1 = zeros(ss, ss);
+ gamma2 = zeros(ss, ss);
+ beta = zeros(ss, ss);
+ P1sum = zeros(ss, ss);
+ x1sum = zeros(ss, 1);
+ loglik = 0;
+
+ for ex = 1:N
+ y = data{ex};
+ T = length(y);
+ [beta_t, gamma_t, delta_t, gamma1_t, gamma2_t, x1, V1, loglik_t] = ...
+ Estep(y, A, C, Q, R, initx, initV, ARmode);
+ beta = beta + beta_t;
+ gamma = gamma + gamma_t;
+ delta = delta + delta_t;
+ gamma1 = gamma1 + gamma1_t;
+ gamma2 = gamma2 + gamma2_t;
+ P1sum = P1sum + V1 + x1*x1';
+ x1sum = x1sum + x1;
+ %fprintf(1, 'example %d, ll/T %5.3f\n', ex, loglik_t/T);
+ loglik = loglik + loglik_t;
+ end
+ LL = [LL loglik];
+ if verbose, fprintf(1, 'iteration %d, loglik = %f\n', num_iter, loglik); end
+ %fprintf(1, 'iteration %d, loglik/NT = %f\n', num_iter, loglik/Tsum);
+ num_iter = num_iter + 1;
+
+ %%% M step
+
+ % Tsum = N*T
+ % Tsum1 = N*(T-1);
+ Tsum1 = Tsum - N;
+ A = beta * inv(gamma1);
+ %A = (gamma1' \ beta')';
+ Q = (gamma2 - A*beta') / Tsum1;
+ if diagQ
+ Q = diag(diag(Q));
+ end
+ if ~ARmode
+ C = delta * inv(gamma);
+ %C = (gamma' \ delta')';
+ R = (alpha - C*delta') / Tsum;
+ if diagR
+ R = diag(diag(R));
+ end
+ end
+ initx = x1sum / N;
+ initV = P1sum/N - initx*initx';
+
+ if ~isempty(constr_fun)
+ [A,C,Q,R,initx,initV] = feval(constr_fun, A, C, Q, R, initx, initV, varargin{:});
+ end
+
+ converged = em_converged(loglik, previous_loglik, thresh);
+ previous_loglik = loglik;
+end
+
+
+
+%%%%%%%%%
+
+function [beta, gamma, delta, gamma1, gamma2, x1, V1, loglik] = ...
+ Estep(y, A, C, Q, R, initx, initV, ARmode)
+%
+% Compute the (expected) sufficient statistics for a single Kalman filter sequence.
+%
+
+[os T] = size(y);
+ss = length(A);
+
+if ARmode
+ xsmooth = y;
+ Vsmooth = zeros(ss, ss, T); % no uncertainty about the hidden states
+ VVsmooth = zeros(ss, ss, T);
+ loglik = 0;
+else
+ [xsmooth, Vsmooth, VVsmooth, loglik] = kalman_smoother(y, A, C, Q, R, initx, initV);
+end
+
+delta = zeros(os, ss);
+gamma = zeros(ss, ss);
+beta = zeros(ss, ss);
+for t=1:T
+ delta = delta + y(:,t)*xsmooth(:,t)';
+ gamma = gamma + xsmooth(:,t)*xsmooth(:,t)' + Vsmooth(:,:,t);
+ if t>1 beta = beta + xsmooth(:,t)*xsmooth(:,t-1)' + VVsmooth(:,:,t); end
+end
+gamma1 = gamma - xsmooth(:,T)*xsmooth(:,T)' - Vsmooth(:,:,T);
+gamma2 = gamma - xsmooth(:,1)*xsmooth(:,1)' - Vsmooth(:,:,1);
+
+x1 = xsmooth(:,1);
+V1 = Vsmooth(:,:,1);
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/learning_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/learning_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+% Make a point move in the 2D plane
+% State = (x y xdot ydot). We only observe (x y).
+% Generate data from this process, and try to learn the dynamics back.
+
+% X(t+1) = F X(t) + noise(Q)
+% Y(t) = H X(t) + noise(R)
+
+ss = 4; % state size
+os = 2; % observation size
+F = [1 0 1 0; 0 1 0 1; 0 0 1 0; 0 0 0 1];
+H = [1 0 0 0; 0 1 0 0];
+Q = 0.1*eye(ss);
+R = 1*eye(os);
+initx = [10 10 1 0]';
+initV = 10*eye(ss);
+
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+T = 100;
+[x,y] = sample_lds(F, H, Q, R, initx, T);
+
+% Initializing the params to sensible values is crucial.
+% Here, we use the true values for everything except F and H,
+% which we initialize randomly (bad idea!)
+% Lack of identifiability means the learned params. are often far from the true ones.
+% All that EM guarantees is that the likelihood will increase.
+F1 = randn(ss,ss);
+H1 = randn(os,ss);
+Q1 = Q;
+R1 = R;
+initx1 = initx;
+initV1 = initV;
+max_iter = 10;
+[F2, H2, Q2, R2, initx2, initV2, LL] = learn_kalman(y, F1, H1, Q1, R1, initx1, initV1, max_iter);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/sample_lds.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/sample_lds.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+function [x,y] = sample_lds(F, H, Q, R, init_state, T, models, G, u)
+% SAMPLE_LDS Simulate a run of a (switching) stochastic linear dynamical system.
+% [x,y] = switching_lds_draw(F, H, Q, R, init_state, models, G, u)
+%
+% x(t+1) = F*x(t) + G*u(t) + w(t), w ~ N(0, Q), x(0) = init_state
+% y(t) = H*x(t) + v(t), v ~ N(0, R)
+%
+% Input:
+% F(:,:,i) - the transition matrix for the i'th model
+% H(:,:,i) - the observation matrix for the i'th model
+% Q(:,:,i) - the transition covariance for the i'th model
+% R(:,:,i) - the observation covariance for the i'th model
+% init_state(:,i) - the initial mean for the i'th model
+% T - the num. time steps to run for
+%
+% Optional inputs:
+% models(t) - which model to use at time t. Default = ones(1,T)
+% G(:,:,i) - the input matrix for the i'th model. Default = 0.
+% u(:,t) - the input vector at time t. Default = zeros(1,T)
+%
+% Output:
+% x(:,t) - the hidden state vector at time t.
+% y(:,t) - the observation vector at time t.
+
+
+if ~iscell(F)
+ F = num2cell(F, [1 2]);
+ H = num2cell(H, [1 2]);
+ Q = num2cell(Q, [1 2]);
+ R = num2cell(R, [1 2]);
+end
+
+M = length(F);
+%T = length(models);
+
+if nargin < 7,
+ models = ones(1,T);
+end
+if nargin < 8,
+ G = num2cell(repmat(0, [1 1 M]));
+ u = zeros(1,T);
+end
+
+[os ss] = size(H{1});
+state_noise_samples = cell(1,M);
+obs_noise_samples = cell(1,M);
+for i=1:M
+ state_noise_samples{i} = sample_gaussian(zeros(length(Q{i}),1), Q{i}, T)';
+ obs_noise_samples{i} = sample_gaussian(zeros(length(R{i}),1), R{i}, T)';
+end
+
+x = zeros(ss, T);
+y = zeros(os, T);
+
+m = models(1);
+x(:,1) = init_state(:,m);
+y(:,1) = H{m}*x(:,1) + obs_noise_samples{m}(:,1);
+
+for t=2:T
+ m = models(t);
+ x(:,t) = F{m}*x(:,t-1) + G{m}*u(:,t-1) + state_noise_samples{m}(:,t);
+ y(:,t) = H{m}*x(:,t) + obs_noise_samples{m}(:,t);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/smooth_update.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/smooth_update.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+function [xsmooth, Vsmooth, VVsmooth_future] = smooth_update(xsmooth_future, Vsmooth_future, ...
+ xfilt, Vfilt, Vfilt_future, VVfilt_future, A, Q, B, u)
+% One step of the backwards RTS smoothing equations.
+% function [xsmooth, Vsmooth, VVsmooth_future] = smooth_update(xsmooth_future, Vsmooth_future, ...
+% xfilt, Vfilt, Vfilt_future, VVfilt_future, A, B, u)
+%
+% INPUTS:
+% xsmooth_future = E[X_t+1|T]
+% Vsmooth_future = Cov[X_t+1|T]
+% xfilt = E[X_t|t]
+% Vfilt = Cov[X_t|t]
+% Vfilt_future = Cov[X_t+1|t+1]
+% VVfilt_future = Cov[X_t+1,X_t|t+1]
+% A = system matrix for time t+1
+% Q = system covariance for time t+1
+% B = input matrix for time t+1 (or [] if none)
+% u = input vector for time t+1 (or [] if none)
+%
+% OUTPUTS:
+% xsmooth = E[X_t|T]
+% Vsmooth = Cov[X_t|T]
+% VVsmooth_future = Cov[X_t+1,X_t|T]
+
+%xpred = E[X(t+1) | t]
+if isempty(B)
+ xpred = A*xfilt;
+else
+ xpred = A*xfilt + B*u;
+end
+Vpred = A*Vfilt*A' + Q; % Vpred = Cov[X(t+1) | t]
+J = Vfilt * A' * inv(Vpred); % smoother gain matrix
+xsmooth = xfilt + J*(xsmooth_future - xpred);
+Vsmooth = Vfilt + J*(Vsmooth_future - Vpred)*J';
+VVsmooth_future = VVfilt_future + (Vsmooth_future - Vfilt_future)*inv(Vfilt_future)*VVfilt_future;
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/testKalman.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/testKalman.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+tracking_demo
+learning_demo
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/Kalman/tracking_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/Kalman/tracking_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,74 @@
+% Make a point move in the 2D plane
+% State = (x y xdot ydot). We only observe (x y).
+
+% This code was used to generate Figure 15.9 of "Artificial Intelligence: a Modern Approach",
+% Russell and Norvig, 2nd edition, Prentice Hall, 2003.
+
+% X(t+1) = F X(t) + noise(Q)
+% Y(t) = H X(t) + noise(R)
+
+ss = 4; % state size
+os = 2; % observation size
+F = [1 0 1 0; 0 1 0 1; 0 0 1 0; 0 0 0 1];
+H = [1 0 0 0; 0 1 0 0];
+Q = 0.1*eye(ss);
+R = 1*eye(os);
+initx = [10 10 1 0]';
+initV = 10*eye(ss);
+
+seed = 9;
+rand('state', seed);
+randn('state', seed);
+T = 15;
+[x,y] = sample_lds(F, H, Q, R, initx, T);
+
+[xfilt, Vfilt, VVfilt, loglik] = kalman_filter(y, F, H, Q, R, initx, initV);
+[xsmooth, Vsmooth] = kalman_smoother(y, F, H, Q, R, initx, initV);
+
+dfilt = x([1 2],:) - xfilt([1 2],:);
+mse_filt = sqrt(sum(sum(dfilt.^2)))
+
+dsmooth = x([1 2],:) - xsmooth([1 2],:);
+mse_smooth = sqrt(sum(sum(dsmooth.^2)))
+
+
+figure(1)
+clf
+%subplot(2,1,1)
+hold on
+plot(x(1,:), x(2,:), 'ks-');
+plot(y(1,:), y(2,:), 'g*');
+plot(xfilt(1,:), xfilt(2,:), 'rx:');
+for t=1:T, plotgauss2d(xfilt(1:2,t), Vfilt(1:2, 1:2, t)); end
+hold off
+legend('true', 'observed', 'filtered', 3)
+xlabel('x')
+ylabel('y')
+
+
+
+% 3x3 inches
+set(gcf,'units','inches');
+set(gcf,'PaperPosition',[0 0 3 3])
+%print(gcf,'-depsc','/home/eecs/murphyk/public_html/Bayes/Figures/aima_filtered.eps');
+%print(gcf,'-djpeg','-r100', '/home/eecs/murphyk/public_html/Bayes/Figures/aima_filtered.jpg');
+
+
+figure(2)
+%subplot(2,1,2)
+hold on
+plot(x(1,:), x(2,:), 'ks-');
+plot(y(1,:), y(2,:), 'g*');
+plot(xsmooth(1,:), xsmooth(2,:), 'rx:');
+for t=1:T, plotgauss2d(xsmooth(1:2,t), Vsmooth(1:2, 1:2, t)); end
+hold off
+legend('true', 'observed', 'smoothed', 3)
+xlabel('x')
+ylabel('y')
+
+
+% 3x3 inches
+set(gcf,'units','inches');
+set(gcf,'PaperPosition',[0 0 3 3])
+%print(gcf,'-djpeg','-r100', '/home/eecs/murphyk/public_html/Bayes/Figures/aima_smoothed.jpg');
+%print(gcf,'-depsc','/home/eecs/murphyk/public_html/Bayes/Figures/aima_smoothed.eps');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+/assocarray.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/subsref.m/1.1.1.1/Wed Aug 4 19:36:30 2004//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/@assocarray
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/@assocarray/assocarray.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/assocarray.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function A = assocarray(keys, vals)
+% ASSOCARRAY Make an associative array
+% function A = assocarray(keys, vals)
+%
+% keys{i} is the i'th string, vals{i} is the i'th value.
+% After construction, A('foo') will return the value associated with foo.
+
+A.keys = keys;
+A.vals = vals;
+A = class(A, 'assocarray');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/@assocarray/subsref.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/subsref.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function val = subsref(A, S)
+% SUBSREF Subscript reference for an associative array
+% A('foo') will return the value associated with foo.
+% If there are multiple identicaly keys, the first match is returned.
+% Currently the search is sequential.
+
+i = 1;
+while i <= length(A.keys)
+ if strcmp(S.subs{1}, A.keys{i})
+ val = A.vals{i};
+ return;
+ end
+ i = i + 1;
+end
+error(['can''t find ' S.subs{1}])
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/boolean_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@boolean_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/boolean_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/boolean_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,179 @@
+function CPD = boolean_CPD(bnet, self, ftype, fname, pfail)
+% BOOLEAN_CPD Make a tabular CPD representing a (noisy) boolean function
+%
+% CPD = boolean_cpd(bnet, self, 'inline', f) uses the inline function f
+% to specify the CPT.
+% e.g., suppose X4 = X2 AND (NOT X3). Then we can write
+% bnet.CPD{4} = boolean_CPD(bnet, 4, 'inline', inline('(x(1) & ~x(2)'));
+% Note that x(1) refers pvals(1) = X2, and x(2) refers to pvals(2)=X3.
+%
+% CPD = boolean_cpd(bnet, self, 'named', f) assumes f is a function name.
+% f can be built-in to matlab, or a file.
+% e.g., If X4 = X2 AND X3, we can write
+% bnet.CPD{4} = boolean_CPD(bnet, 4, 'named', 'and');
+% e.g., If X4 = X2 OR X3, we can write
+% bnet.CPD{4} = boolean_CPD(bnet, 4, 'named', 'any');
+%
+% CPD = boolean_cpd(bnet, self, 'rnd') makes a random non-redundant bool fn.
+%
+% CPD = boolean_CPD(bnet, self, 'inline'/'named', f, pfail)
+% will put probability mass 1-pfail on f(parents), and put pfail on the other value.
+% This is useful for simulating noisy boolean functions.
+% If pfail is omitted, it is set to 0.
+% (Note that adding noise to a random (non-redundant) boolean function just creates a different
+% (potentially redundant) random boolean function.)
+%
+% Note: This cannot be used to simulate a noisy-OR gate.
+% Example: suppose C has parents A and B, and the
+% link of A->C fails with prob pA and the link B->C fails with pB.
+% Then the noisy-OR gate defines the following distribution
+%
+% A B P(C=0)
+% 0 0 1.0
+% 1 0 pA
+% 0 1 pB
+% 1 1 pA * PB
+%
+% By contrast, boolean_CPD(bnet, C, 'any', p) would define
+%
+% A B P(C=0)
+% 0 0 1-p
+% 1 0 p
+% 0 1 p
+% 1 1 p
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = tabular_CPD(bnet, self);
+ return;
+elseif isa(bnet, 'boolean_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+
+if nargin < 5, pfail = 0; end
+
+ps = parents(bnet.dag, self);
+ns = bnet.node_sizes;
+psizes = ns(ps);
+self_size = ns(self);
+
+psucc = 1-pfail;
+
+k = length(ps);
+switch ftype
+ case 'inline', f = eval_bool_fn(fname, k);
+ case 'named', f = eval_bool_fn(fname, k);
+ case 'rnd', f = mk_rnd_bool_fn(k);
+ otherwise, error(['unknown function type ' ftype]);
+end
+
+CPT = zeros(prod(psizes), self_size);
+ndx = find(f==0);
+CPT(ndx, 1) = psucc;
+CPT(ndx, 2) = pfail;
+ndx = find(f==1);
+CPT(ndx, 2) = psucc;
+CPT(ndx, 1) = pfail;
+if k > 0
+ CPT = reshape(CPT, [psizes self_size]);
+end
+
+clamp = 1;
+CPD = tabular_CPD(bnet, self, CPT, [], clamp);
+
+
+
+%%%%%%%%%%%%
+
+function f = eval_bool_fn(fname, n)
+% EVAL_BOOL_FN Evaluate a boolean function on all bit vectors of length n
+% f = eval_bool_fn(fname, n)
+%
+% e.g. f = eval_bool_fn(inline('x(1) & x(3)'), 3)
+% returns 0 0 0 0 0 1 0 1
+
+ns = 2*ones(1, n);
+f = zeros(1, 2^n);
+bits = ind2subv(ns, 1:2^n);
+for i=1:2^n
+ f(i) = feval(fname, bits(i,:)-1);
+end
+
+%%%%%%%%%%%%%%%
+
+function f = mk_rnd_bool_fn(n)
+% MK_RND_BOOL_FN Make a random bit vector of length n that encodes a non-redundant boolean function
+% f = mk_rnd_bool_fn(n)
+
+red = 1;
+while red
+ f = sample_discrete([0.5 0.5], 2^n, 1)-1;
+ red = redundant_bool_fn(f);
+end
+
+%%%%%%%%
+
+
+function red = redundant_bool_fn(f)
+% REDUNDANT_BOOL_FN Does a boolean function depend on all its input values?
+% r = redundant_bool_fn(f)
+%
+% f is a vector of length 2^n, representing the output for each bit vector.
+% An input is redundant if there is no assignment to the other bits
+% which changes the output e.g., input 1 is redundant if u(2:n) s.t.,
+% f([0 u(2:n)]) <> f([1 u(2:n)]).
+% A function is redundant it it has any redundant inputs.
+
+n = log2(length(f));
+ns = 2*ones(1,n);
+red = 0;
+for i=1:n
+ ens = ns;
+ ens(i) = 1;
+ U = ind2subv(ens, 1:2^(n-1));
+ U(:,i) = 1;
+ f1 = f(subv2ind(ns, U));
+ U(:,i) = 2;
+ f2 = f(subv2ind(ns, U));
+ if isequal(f1, f2)
+ red = 1;
+ return;
+ end
+end
+
+
+%%%%%%%%%%
+
+function [b, iter] = rnd_truth_table(N)
+% RND_TRUTH_TABLE Construct the output of a random truth table s.t. each input is non-redundant
+% b = rnd_truth_table(N)
+%
+% N is the number of inputs.
+% b is a random bit string of length N, representing the output of the truth table.
+% Non-redundant means that, for each input position k,
+% there are at least two bit patterns, u and v, that differ only in the k'th position,
+% s.t., f(u) ~= f(v), where f is the function represented by b.
+% We use rejection sampling to ensure non-redundancy.
+%
+% Example: b = [0 0 0 1 0 0 0 1] is indep of 3rd input (AND of inputs 1 and 2)
+
+bits = ind2subv(2*ones(1,N), 1:2^N)-1;
+redundant = 1;
+iter = 0;
+while redundant & (iter < 4)
+ iter = iter + 1;
+ b = sample_discrete([0.5 0.5], 1, 2^N)-1;
+ redundant = 0;
+ for i=1:N
+ on = find(bits(:,i)==1);
+ off = find(bits(:,i)==0);
+ if isequal(b(on), b(off))
+ redundant = 1;
+ break;
+ end
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/deterministic_CPD.m/1.1.1.1/Mon Oct 7 13:26:36 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@deterministic_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/deterministic_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/deterministic_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function CPD = deterministic_CPD(bnet, self, fname, pfail)
+% DETERMINISTIC_CPD Make a tabular CPD representing a (noisy) deterministic function
+%
+% CPD = deterministic_CPD(bnet, self, fname)
+% This calls feval(fname, pvals) for each possible vector of parent values.
+% e.g., suppose there are 2 ternary parents, then pvals =
+% [1 1], [2 1], [3 1], [1 2], [2 2], [3 2], [1 3], [2 3], [3 3]
+% If v = feval(fname, pvals(i)), then
+% CPD(x | parents=pvals(i)) = 1 if x==v, and = 0 if x<>v
+% e.g., suppose X4 = X2 AND (NOT X3). Then
+% bnet.CPD{4} = deterministic_CPD(bnet, 4, inline('((x(1)-1) & ~(x(2)-1)) + 1'));
+% Note that x(1) refers pvals(1) = X2, and x(2) refers to pvals(2)=X3
+% See also boolean_CPD.
+%
+% CPD = deterministic_CPD(bnet, self, fname, pfail)
+% will put probability mass 1-pfail on f(parents), and distribute pfail over the other values.
+% This is useful for simulating noisy deterministic functions.
+% If pfail is omitted, it is set to 0.
+%
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = tabular_CPD(bnet, self);
+ return;
+elseif isa(bnet, 'deterministic_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+
+if nargin < 4, pfail = 0; end
+
+ps = parents(bnet.dag, self);
+ns = bnet.node_sizes;
+psizes = ns(ps);
+self_size = ns(self);
+
+psucc = 1-pfail;
+
+CPT = zeros(prod(psizes), self_size);
+pvals = zeros(1, length(ps));
+for i=1:prod(psizes)
+ pvals = ind2subv(psizes, i);
+ x = feval(fname, pvals);
+ %fprintf('%d ', [pvals x]); fprintf('\n');
+ if psucc == 1
+ CPT(i, x) = 1;
+ else
+ CPT(i, x) = psucc;
+ rest = mysetdiff(1:self_size, x);
+ CPT(i, rest) = pfail/length(rest);
+ end
+end
+CPT = reshape(CPT, [psizes self_size]);
+
+CPD = tabular_CPD(bnet, self, 'CPT',CPT, 'clamped',1);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_lambda_msg.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_lambda_msg.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence)
+% CPD_TO_LAMBDA_MSG Compute lambda message (discrete)
+% lam_msg = compute_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence)
+% Pearl p183 eq 4.52
+
+switch msg_type
+ case 'd',
+ T = prod_CPT_and_pi_msgs(CPD, n, ps, msg, p);
+ mysize = length(msg{n}.lambda);
+ lambda = dpot(n, mysize, msg{n}.lambda);
+ T = multiply_by_pot(T, lambda);
+ lam_msg = pot_to_marginal(marginalize_pot(T, p));
+ lam_msg = lam_msg.T;
+ case 'g',
+ error('discrete_CPD can''t create Gaussian msgs')
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_pi.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_pi.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence)
+% COMPUTE_PI Compute pi vector (discrete)
+% pi = compute_pi(CPD, msg_type, n, ps, msg, evidence)
+% Pearl p183 eq 4.51
+
+switch msg_type
+ case 'd',
+ T = prod_CPT_and_pi_msgs(CPD, n, ps, msg);
+ pi = pot_to_marginal(marginalize_pot(T, n));
+ pi = pi.T(:);
+ case 'g',
+ error('can only convert discrete CPD to Gaussian pi if observed')
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_scgpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_scgpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function pot = CPD_to_scgpot(CPD, domain, ns, cnodes, evidence)
+% CPD_TO_SCGPOT Convert a CPD to a CG potential, incorporating any evidence (discrete)
+% pot = CPD_to_scgpot(CPD, domain, ns, cnodes, evidence)
+%
+% domain is the domain of CPD.
+% node_sizes(i) is the size of node i.
+% cnodes
+% evidence{i} is the evidence on the i'th node.
+
+%odom = domain(~isemptycell(evidence(domain)));
+
+%vals = cat(1, evidence{odom});
+%map = find_equiv_posns(odom, domain);
+%index = mk_multi_index(length(domain), map, vals);
+CPT = CPD_to_CPT(CPD);
+%CPT = CPT(index{:});
+CPT = CPT(:);
+%ns(odom) = 1;
+potarray = cell(1, length(CPT));
+for i=1:length(CPT)
+ %p = CPT(i);
+ potarray{i} = scgcpot(0, 0, CPT(i));
+ %scpot{i} = scpot(0, 0);
+end
+pot = scgpot(domain, [], [], ns, potarray);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+/CPD_to_lambda_msg.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/CPD_to_pi.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/CPD_to_scgpot.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/README/1.1.1.1/Wed May 29 15:59:52 2002//
+/convert_CPD_to_table_hidden_ps.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/convert_obs_CPD_to_table.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/convert_to_pot.m/1.1.1.1/Fri Feb 20 22:00:38 2004//
+/convert_to_sparse_table.c/1.1.1.1/Wed May 29 15:59:52 2002//
+/convert_to_table.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/discrete_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/dom_sizes.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/log_prob_node.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/prob_node.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/sample_node.m/1.1.1.1/Wed May 29 15:59:52 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+A D/Old////
+A D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@discrete_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/convert_to_pot.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/convert_to_table.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/prob_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/prob_node.m/1.1.1.1/Wed May 29 15:59:52 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@discrete_CPD/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/convert_to_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/convert_to_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function pot = convert_to_pot(CPD, pot_type, domain, evidence)
+% CONVERT_TO_POT Convert a tabular CPD to one or more potentials
+% pots = convert_to_pot(CPD, pot_type, domain, evidence)
+%
+% pots{i} = CPD evaluated using evidence(domain(:,i))
+% If 'domains' is a single row vector, pots will be an object, not a cell array.
+
+ncases = size(domain,2);
+assert(ncases==1); % not yet vectorized
+
+sz = dom_sizes(CPD);
+ns = zeros(1, max(domain));
+ns(domain) = sz;
+
+local_ev = evidence(domain);
+obs_bitv = ~isemptycell(local_ev);
+odom = domain(obs_bitv);
+T = convert_to_table(CPD, domain, local_ev, obs_bitv);
+
+switch pot_type
+ case 'u',
+ pot = upot(domain, sz, T, 0*myones(sz));
+ case 'd',
+ ns(odom) = 1;
+ pot = dpot(domain, ns(domain), T);
+ case {'c','g'},
+ % Since we want the output to be a Gaussian, the whole family must be observed.
+ % In other words, the potential is really just a constant.
+ p = T;
+ %p = prob_node(CPD, evidence(domain(end)), evidence(domain(1:end-1)));
+ ns(domain) = 0;
+ pot = cpot(domain, ns(domain), log(p));
+ case 'cg',
+ T = T(:);
+ ns(odom) = 1;
+ can = cell(1, length(T));
+ for i=1:length(T)
+ can{i} = cpot([], [], log(T(i)));
+ end
+ pot = cgpot(domain, [], ns, can);
+ otherwise,
+ error(['unrecognized pot type ' pot_type])
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/convert_to_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/convert_to_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+function T = convert_to_table(CPD, domain, local_ev, obs_bitv)
+% CONVERT_TO_TABLE Convert a discrete CPD to a table
+% function T = convert_to_table(CPD, domain, local_ev, obs_bitv)
+%
+% We convert the CPD to a CPT, and then lookup the evidence on the discrete parents.
+% The resulting table can easily be converted to a potential.
+
+
+CPT = CPD_to_CPT(CPD);
+obs_child_only = ~any(obs_bitv(1:end-1)) & obs_bitv(end);
+
+if obs_child_only
+ sz = size(CPT);
+ CPT = reshape(CPT, prod(sz(1:end-1)), sz(end));
+ o = local_ev{end};
+ T = CPT(:, o);
+else
+ odom = domain(obs_bitv);
+ vals = cat(1, local_ev{find(obs_bitv)}); % undo cell array
+ map = find_equiv_posns(odom, domain);
+ index = mk_multi_index(length(domain), map, vals);
+ T = CPT(index{:});
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/prob_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/prob_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function p = prob_CPD(CPD, domain, ns, cnodes, evidence)
+% PROB_CPD Compute prob of a node given evidence on the parents (discrete)
+% p = prob_CPD(CPD, domain, ns, cnodes, evidence)
+%
+% domain is the domain of CPD.
+% node_sizes(i) is the size of node i.
+% cnodes = all the cts nodes
+% evidence{i} is the evidence on the i'th node.
+
+ps = domain(1:end-1);
+self = domain(end);
+CPT = CPD_to_CPT(CPD);
+
+if isempty(ps)
+ T = CPT;
+else
+ assert(~any(isemptycell(evidence(ps))));
+ pvals = cat(1, evidence{ps});
+ i = subv2ind(ns(ps), pvals(:)');
+ T = reshape(CPT, [prod(ns(ps)) ns(self)]);
+ T = T(i,:);
+end
+p = T(evidence{self});
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+function [P, p] = prob_node(CPD, self_ev, pev)
+% PROB_NODE Compute prod_m P(x(i,m)| x(pi_i,m), theta_i) for node i (discrete)
+% [P, p] = prob_node(CPD, self_ev, pev)
+%
+% self_ev(m) is the evidence on this node in case m.
+% pev(i,m) is the evidence on the i'th parent in case m (if there are any parents).
+% (These may also be cell arrays.)
+%
+% p(m) = P(x(i,m)| x(pi_i,m), theta_i)
+% P = prod p(m)
+
+if iscell(self_ev), usecell = 1; else usecell = 0; end
+
+ncases = length(self_ev);
+sz = dom_sizes(CPD);
+
+nparents = length(sz)-1;
+if nparents == 0
+ assert(isempty(pev));
+else
+ assert(isequal(size(pev), [nparents ncases]));
+end
+
+n = length(sz);
+dom = 1:n;
+p = zeros(1, ncases);
+if nparents == 0
+ for m=1:ncases
+ if usecell
+ evidence = {self_ev{m}};
+ else
+ evidence = num2cell(self_ev(m));
+ end
+ T = convert_to_table(CPD, dom, evidence);
+ p(m) = T;
+ end
+else
+ for m=1:ncases
+ if usecell
+ evidence = cell(1,n);
+ evidence(1:n-1) = pev(:,m);
+ evidence(n) = self_ev(m);
+ else
+ evidence = num2cell([pev(:,m)', self_ev(m)]);
+ end
+ T = convert_to_table(CPD, dom, evidence);
+ p(m) = T;
+ end
+end
+P = prod(p);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/README
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/README Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+Any CPD on a discrete child with discrete parents
+can be represented as a table (although this might be quite big).
+discrete_CPD uses this tabular representation to implement various
+functions. Subtypes are free to implement more efficient versions.
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_CPD_to_table_hidden_ps.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_CPD_to_table_hidden_ps.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+function T = convert_CPD_to_table_hidden_ps(CPD, child_obs)
+% CONVERT_CPD_TO_TABLE_HIDDEN_PS Convert a discrete CPD to a table
+% T = convert_CPD_to_table_hidden_ps(CPD, child_obs)
+%
+% This is like convert_to_table, except that we are guaranteed that
+% none of the parents have evidence on them.
+% child_obs may be an integer (1,2,...) or [].
+
+CPT = CPD_to_CPT(CPD);
+if isempty(child_obs)
+ T = CPT(:);
+else
+ sz = dom_sizes(CPD);
+ if length(sz)==1 % no parents
+ T = CPT(child_obs);
+ else
+ CPT = reshape(CPT, prod(sz(1:end-1)), sz(end));
+ T = CPT(:, child_obs);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_obs_CPD_to_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_obs_CPD_to_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function T = convert_to_table(CPD, domain, evidence)
+% CONVERT_TO_TABLE Convert a discrete CPD to a table
+% T = convert_to_table(CPD, domain, evidence)
+%
+% We convert the CPD to a CPT, and then lookup the evidence on the discrete parents.
+% The resulting table can easily be converted to a potential.
+
+CPT = CPD_to_CPT(CPD);
+odom = domain(~isemptycell(evidence(domain)));
+vals = cat(1, evidence{odom});
+map = find_equiv_posns(odom, domain);
+index = mk_multi_index(length(domain), map, vals);
+T = CPT(index{:});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_to_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_to_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+function pot = convert_to_pot(CPD, pot_type, domain, evidence)
+% CONVERT_TO_POT Convert a discrete CPD to a potential
+% pot = convert_to_pot(CPD, pot_type, domain, evidence)
+%
+% pots = CPD evaluated using evidence(domain)
+
+ncases = size(domain,2);
+assert(ncases==1); % not yet vectorized
+
+sz = dom_sizes(CPD);
+ns = zeros(1, max(domain));
+ns(domain) = sz;
+
+CPT1 = CPD_to_CPT(CPD);
+spar = issparse(CPT1);
+odom = domain(~isemptycell(evidence(domain)));
+if spar
+ T = convert_to_sparse_table(CPD, domain, evidence);
+else
+ T = convert_to_table(CPD, domain, evidence);
+end
+
+switch pot_type
+ case 'u',
+ pot = upot(domain, sz, T, 0*myones(sz));
+ case 'd',
+ ns(odom) = 1;
+ pot = dpot(domain, ns(domain), T);
+ case {'c','g'},
+ % Since we want the output to be a Gaussian, the whole family must be observed.
+ % In other words, the potential is really just a constant.
+ p = T;
+ %p = prob_node(CPD, evidence(domain(end)), evidence(domain(1:end-1)));
+ ns(domain) = 0;
+ pot = cpot(domain, ns(domain), log(p));
+
+ case 'cg',
+ T = T(:);
+ ns(odom) = 1;
+ can = cell(1, length(T));
+ for i=1:length(T)
+ if T(i) == 0
+ can{i} = cpot([], [], -Inf); % bug fix by Bob Welch 20/2/04
+ else
+ can{i} = cpot([], [], log(T(i)));
+ end;
+ end
+ pot = cgpot(domain, [], ns, can);
+
+ case 'scg'
+ T = T(:);
+ ns(odom) = 1;
+ pot_array = cell(1, length(T));
+ for i=1:length(T)
+ pot_array{i} = scgcpot([], [], T(i));
+ end
+ pot = scgpot(domain, [], [], ns, pot_array);
+
+ otherwise,
+ error(['unrecognized pot type ' pot_type])
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_to_sparse_table.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_to_sparse_table.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,154 @@
+/* convert_to_sparse_table.c convert a sparse discrete CPD with evidence into sparse table */
+/* convert_to_pot.m located in ../CPDs/discrete_CPD call it */
+/* 3 input */
+/* CPD prhs[0] with 1D sparse CPT */
+/* domain prhs[1] */
+/* evidence prhs[2] */
+/* 1 output */
+/* T plhs[0] sparse table */
+
+#include
+#include "mex.h"
+
+void ind_subv(int index, const int *cumprod, const int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i 1e-3) | isinf(P)
+ if isinf(P) % Y is observed
+ Sigma_lambda = zeros(self_size, self_size); % infinite precision => 0 variance
+ mu_lambda = msg{n}.lambda.mu; % observed_value;
+ else
+ Sigma_lambda = inv(P);
+ mu_lambda = Sigma_lambda * msg{n}.lambda.info_state;
+ end
+ C = inv(Sigma_lambda + BSigma);
+ lam_msg.precision = Bi' * C * Bi;
+ lam_msg.info_state = Bi' * C * (mu_lambda - Bmu);
+ else
+ % method that uses matrix inversion lemma to avoid inverting P
+ A = inv(P + inv(BSigma));
+ C = P - P*A*P;
+ lam_msg.precision = Bi' * C * Bi;
+ D = eye(self_size) - P*A;
+ z = msg{n}.lambda.info_state;
+ lam_msg.info_state = Bi' * (D*z - D*P*Bmu);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CPD_to_pi.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CPD_to_pi.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence)
+% CPD_TO_PI Compute the pi vector (gaussian)
+% function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence)
+
+switch msg_type
+ case 'd',
+ error('gaussian_CPD can''t create discrete msgs')
+ case 'g',
+ [m, Q, W] = gaussian_CPD_params_given_dps(CPD, [ps n], evidence);
+ cps = ps(CPD.cps);
+ cpsizes = CPD.sizes(CPD.cps);
+ pi.mu = m;
+ pi.Sigma = Q;
+ for k=1:length(cps) % only get pi msgs from cts parents
+ %bk = block(k, cpsizes);
+ bk = CPD.cps_block_ndx{k};
+ Bk = W(:, bk);
+ m = msg{n}.pi_from_parent{k};
+ pi.Sigma = pi.Sigma + Bk * m.Sigma * Bk';
+ pi.mu = pi.mu + Bk * m.mu; % m.mu = u(k)
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CPD_to_scgpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CPD_to_scgpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,58 @@
+function pot = CPD_to_scgpot(CPD, domain, ns, cnodes, evidence)
+% CPD_TO_CGPOT Convert a Gaussian CPD to a CG potential, incorporating any evidence
+% pot = CPD_to_cgpot(CPD, domain, ns, cnodes, evidence)
+
+self = CPD.self;
+dnodes = mysetdiff(1:length(ns), cnodes);
+odom = domain(~isemptycell(evidence(domain)));
+cdom = myintersect(cnodes, domain);
+cheaddom = myintersect(self, domain);
+ctaildom = mysetdiff(cdom,cheaddom);
+ddom = myintersect(dnodes, domain);
+cobs = myintersect(cdom, odom);
+dobs = myintersect(ddom, odom);
+ens = ns; % effective node size
+ens(cobs) = 0;
+ens(dobs) = 1;
+
+% Extract the params compatible with the observations (if any) on the discrete parents (if any)
+% parents are all but the last domain element
+ps = domain(1:end-1);
+dps = myintersect(ps, ddom);
+dops = myintersect(dps, odom);
+
+map = find_equiv_posns(dops, dps);
+dpvals = cat(1, evidence{dops});
+index = mk_multi_index(length(dps), map, dpvals);
+
+dpsize = prod(ens(dps));
+cpsize = size(CPD.weights(:,:,1), 2); % cts parents size
+ss = size(CPD.mean, 1); % self size
+% the reshape acts like a squeeze
+m = reshape(CPD.mean(:, index{:}), [ss dpsize]);
+C = reshape(CPD.cov(:, :, index{:}), [ss ss dpsize]);
+W = reshape(CPD.weights(:, :, index{:}), [ss cpsize dpsize]);
+
+
+% Convert each conditional Gaussian to a canonical potential
+pot = cell(1, dpsize);
+for i=1:dpsize
+ %pot{i} = linear_gaussian_to_scgcpot(m(:,i), C(:,:,i), W(:,:,i), cdom, ns, cnodes, evidence);
+ pot{i} = scgcpot(ss, cpsize, 1, m(:,i), W(:,:,i), C(:,:,i));
+end
+
+pot = scgpot(ddom, cheaddom, ctaildom, ens, pot);
+
+
+function pot = linear_gaussian_to_scgcpot(mu, Sigma, W, domain, ns, cnodes, evidence)
+% LINEAR_GAUSSIAN_TO_CPOT Convert a linear Gaussian CPD to a stable conditional potential element.
+% pot = linear_gaussian_to_cpot(mu, Sigma, W, domain, ns, cnodes, evidence)
+
+p = 1;
+A = mu;
+B = W;
+C = Sigma;
+ns(odom) = 0;
+%pot = scgcpot(, ns(domain), p, A, B, C);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+/CPD_to_lambda_msg.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/CPD_to_pi.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/CPD_to_scgpot.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/adjustable_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/convert_CPD_to_table_hidden_ps.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/convert_to_pot.m/1.1.1.1/Sun Mar 9 23:03:16 2003//
+/convert_to_table.m/1.1.1.1/Sun May 11 23:31:54 2003//
+/display.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/gaussian_CPD.m/1.1.1.1/Wed Jun 15 21:13:06 2005//
+/gaussian_CPD_params_given_dps.m/1.1.1.1/Sun May 11 23:13:40 2003//
+/get_field.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/learn_params.m/1.1.1.1/Thu Jun 10 01:28:10 2004//
+/log_prob_node.m/1.1.1.1/Tue Sep 10 17:44:00 2002//
+/maximize_params.m/1.1.1.1/Tue May 20 14:10:06 2003//
+/maximize_params_debug.m/1.1.1.1/Fri Jan 31 00:13:10 2003//
+/reset_ess.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/sample_node.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/set_fields.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/update_ess.m/1.1.1.1/Tue Jul 22 22:55:46 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+A D/Old////
+A D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@gaussian_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CPD_to_lambda_msg.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CPD_to_lambda_msg.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,64 @@
+function lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p)
+% CPD_TO_LAMBDA_MSG Compute lambda message (gaussian)
+% lam_msg = compute_lambda_msg(CPD, msg_type, n, ps, msg, p)
+% Pearl p183 eq 4.52
+
+switch msg_type
+ case 'd',
+ error('gaussian_CPD can''t create discrete msgs')
+ case 'g',
+ self_size = CPD.sizes(end);
+ if all(msg{n}.lambda.precision == 0) % no info to send on
+ lam_msg.precision = zeros(self_size);
+ lam_msg.info_state = zeros(self_size, 1);
+ return;
+ end
+ cpsizes = CPD.sizes(CPD.cps);
+ dpval = 1;
+ Q = CPD.cov(:,:,dpval);
+ Sigmai = Q;
+ wmu = zeros(self_size, 1);
+ for k=1:length(ps)
+ pk = ps(k);
+ if pk ~= p
+ bk = block(k, cpsizes);
+ Bk = CPD.weights(:, bk, dpval);
+ m = msg{n}.pi_from_parent{k};
+ Sigmai = Sigmai + Bk * m.Sigma * Bk';
+ wmu = wmu + Bk * m.mu; % m.mu = u(k)
+ end
+ end
+ % Sigmai = Q + sum_{k \neq i} B_k Sigma_k B_k'
+ i = find_equiv_posns(p, ps);
+ bi = block(i, cpsizes);
+ Bi = CPD.weights(:,bi, dpval);
+
+ if 0
+ P = msg{n}.lambda.precision;
+ if isinf(P) % inv(P)=Sigma_lambda=0
+ precision_temp = inv(Sigmai);
+ lam_msg.precision = Bi' * precision_temp * Bi;
+ lam_msg.info_state = precision_temp * (msg{n}.lambda.mu - wmu);
+ else
+ A = inv(P + inv(Sigmai));
+ precision_temp = P + P*A*P;
+ lam_msg.precision = Bi' * precision_temp * Bi;
+ self_size = length(P);
+ C = eye(self_size) + P*A;
+ z = msg{n}.lambda.info_state;
+ lam_msg.info_state = C*z - C*P*wmu;
+ end
+ end
+
+ if isinf(msg{n}.lambda.precision)
+ Sigma_lambda = zeros(self_size, self_size); % infinite precision => 0 variance
+ mu_lambda = msg{n}.lambda.mu; % observed_value;
+ else
+ Sigma_lambda = inv(msg{n}.lambda.precision);
+ mu_lambda = Sigma_lambda * msg{n}.lambda.info_state;
+ end
+ precision_temp = inv(Sigma_lambda + Sigmai);
+ lam_msg.precision = Bi' * precision_temp * Bi;
+ lam_msg.info_state = Bi' * precision_temp * (mu_lambda - wmu);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/CPD_to_lambda_msg.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/gaussian_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/log_prob_node.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/maximize_params.m/1.1.1.1/Thu Jan 30 22:38:16 2003//
+/update_ess.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/update_tied_ess.m/1.1.1.1/Wed May 29 15:59:52 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@gaussian_CPD/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/gaussian_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/gaussian_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,184 @@
+function CPD = gaussian_CPD(varargin)
+% GAUSSIAN_CPD Make a conditional linear Gaussian distrib.
+%
+% To define this CPD precisely, call the continuous (cts) parents (if any) X,
+% the discrete parents (if any) Q, and this node Y. Then the distribution on Y is:
+% - no parents: Y ~ N(mu, Sigma)
+% - cts parents : Y|X=x ~ N(mu + W x, Sigma)
+% - discrete parents: Y|Q=i ~ N(mu(i), Sigma(i))
+% - cts and discrete parents: Y|X=x,Q=i ~ N(mu(i) + W(i) x, Sigma(i))
+%
+% CPD = gaussian_CPD(bnet, node, ...) will create a CPD with random parameters,
+% where node is the number of a node in this equivalence class.
+%
+% The list below gives optional arguments [default value in brackets].
+% (Let ns(i) be the size of node i, X = ns(X), Y = ns(Y) and Q = prod(ns(Q)).)
+%
+% mean - mu(:,i) is the mean given Q=i [ randn(Y,Q) ]
+% cov - Sigma(:,:,i) is the covariance given Q=i [ repmat(eye(Y,Y), [1 1 Q]) ]
+% weights - W(:,:,i) is the regression matrix given Q=i [ randn(Y,X,Q) ]
+% cov_type - if 'diag', Sigma(:,:,i) is diagonal [ 'full' ]
+% tied_cov - if 1, we constrain Sigma(:,:,i) to be the same for all i [0]
+% clamp_mean - if 1, we do not adjust mu(:,i) during learning [0]
+% clamp_cov - if 1, we do not adjust Sigma(:,:,i) during learning [0]
+% clamp_weights - if 1, we do not adjust W(:,:,i) during learning [0]
+% cov_prior_weight - weight given to I prior for estimating Sigma [0.01]
+%
+% e.g., CPD = gaussian_CPD(bnet, i, 'mean', [0; 0], 'clamp_mean', 'yes')
+%
+% For backwards compatibility with BNT2, you can also specify the parameters in the following order
+% CPD = gaussian_CPD(bnet, self, mu, Sigma, W, cov_type, tied_cov, clamp_mean, clamp_cov, clamp_weight)
+%
+% Sometimes it is useful to create an "isolated" CPD, without needing to pass in a bnet.
+% In this case, you must specify the discrete and cts parents (dps, cps) and the family sizes, followed
+% by the optional arguments above:
+% CPD = gaussian_CPD('self', i, 'dps', dps, 'cps', cps, 'sz', fam_size, ...)
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ clamp = 0;
+ CPD = class(CPD, 'gaussian_CPD', generic_CPD(clamp));
+ return;
+elseif isa(varargin{1}, 'gaussian_CPD')
+ % This might occur if we are copying an object.
+ CPD = varargin{1};
+ return;
+end
+CPD = init_fields;
+
+CPD = class(CPD, 'gaussian_CPD', generic_CPD(0));
+
+
+% parse mandatory arguments
+if ~isstr(varargin{1}) % pass in bnet
+ bnet = varargin{1};
+ self = varargin{2};
+ args = varargin(3:end);
+ ns = bnet.node_sizes;
+ ps = parents(bnet.dag, self);
+ dps = myintersect(ps, bnet.dnodes);
+ cps = myintersect(ps, bnet.cnodes);
+ fam_sz = ns([ps self]);
+else
+ disp('parsing new style')
+ for i=1:2:length(varargin)
+ switch varargin{i},
+ case 'self', self = varargin{i+1};
+ case 'dps', dps = varargin{i+1};
+ case 'cps', cps = varargin{i+1};
+ case 'sz', fam_sz = varargin{i+1};
+ end
+ end
+ ps = myunion(dps, cps);
+ args = varargin;
+end
+
+CPD.self = self;
+CPD.sizes = fam_sz;
+
+% Figure out which (if any) of the parents are discrete, and which cts, and how big they are
+% dps = discrete parents, cps = cts parents
+CPD.cps = find_equiv_posns(cps, ps); % cts parent index
+CPD.dps = find_equiv_posns(dps, ps);
+ss = fam_sz(end);
+psz = fam_sz(1:end-1);
+dpsz = prod(psz(CPD.dps));
+cpsz = sum(psz(CPD.cps));
+
+% set default params
+CPD.mean = randn(ss, dpsz);
+CPD.cov = 100*repmat(eye(ss), [1 1 dpsz]);
+CPD.weights = randn(ss, cpsz, dpsz);
+CPD.cov_type = 'full';
+CPD.tied_cov = 0;
+CPD.clamped_mean = 0;
+CPD.clamped_cov = 0;
+CPD.clamped_weights = 0;
+CPD.cov_prior_weight = 0.01;
+
+nargs = length(args);
+if nargs > 0
+ if ~isstr(args{1})
+ % gaussian_CPD(bnet, self, mu, Sigma, W, cov_type, tied_cov, clamp_mean, clamp_cov, clamp_weights)
+ if nargs >= 1 & ~isempty(args{1}), CPD.mean = args{1}; end
+ if nargs >= 2 & ~isempty(args{2}), CPD.cov = args{2}; end
+ if nargs >= 3 & ~isempty(args{3}), CPD.weights = args{3}; end
+ if nargs >= 4 & ~isempty(args{4}), CPD.cov_type = args{4}; end
+ if nargs >= 5 & ~isempty(args{5}) & strcmp(args{5}, 'tied'), CPD.tied_cov = 1; end
+ if nargs >= 6 & ~isempty(args{6}), CPD.clamped_mean = 1; end
+ if nargs >= 7 & ~isempty(args{7}), CPD.clamped_cov = 1; end
+ if nargs >= 8 & ~isempty(args{8}), CPD.clamped_weights = 1; end
+ else
+ CPD = set_fields(CPD, args{:});
+ end
+end
+
+% Make sure the matrices have 1 dimension per discrete parent.
+% Bug fix due to Xuejing Sun 3/6/01
+CPD.mean = myreshape(CPD.mean, [ss ns(dps)]);
+CPD.cov = myreshape(CPD.cov, [ss ss ns(dps)]);
+CPD.weights = myreshape(CPD.weights, [ss cpsz ns(dps)]);
+
+CPD.init_cov = CPD.cov; % we reset to this if things go wrong during learning
+
+% expected sufficient statistics
+CPD.Wsum = zeros(dpsz,1);
+CPD.WYsum = zeros(ss, dpsz);
+CPD.WXsum = zeros(cpsz, dpsz);
+CPD.WYYsum = zeros(ss, ss, dpsz);
+CPD.WXXsum = zeros(cpsz, cpsz, dpsz);
+CPD.WXYsum = zeros(cpsz, ss, dpsz);
+
+% For BIC
+CPD.nsamples = 0;
+switch CPD.cov_type
+ case 'full',
+ ncov_params = ss*(ss-1)/2; % since symmetric (and positive definite)
+ case 'diag',
+ ncov_params = ss;
+ otherwise
+ error(['unrecognized cov_type ' cov_type]);
+end
+% params = weights + mean + cov
+if CPD.tied_cov
+ CPD.nparams = ss*cpsz*dpsz + ss*dpsz + ncov_params;
+else
+ CPD.nparams = ss*cpsz*dpsz + ss*dpsz + dpsz*ncov_params;
+end
+
+
+
+clamped = CPD.clamped_mean & CPD.clamped_cov & CPD.clamped_weights;
+CPD = set_clamped(CPD, clamped);
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.self = [];
+CPD.sizes = [];
+CPD.cps = [];
+CPD.dps = [];
+CPD.mean = [];
+CPD.cov = [];
+CPD.weights = [];
+CPD.clamped_mean = [];
+CPD.clamped_cov = [];
+CPD.clamped_weights = [];
+CPD.init_cov = [];
+CPD.cov_type = [];
+CPD.tied_cov = [];
+CPD.Wsum = [];
+CPD.WYsum = [];
+CPD.WXsum = [];
+CPD.WYYsum = [];
+CPD.WXXsum = [];
+CPD.WXYsum = [];
+CPD.nsamples = [];
+CPD.nparams = [];
+CPD.cov_prior_weight = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/log_prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/log_prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function L = log_prob_node(CPD, self_ev, pev)
+% LOG_PROB_NODE Compute prod_m log P(x(i,m)| x(pi_i,m), theta_i) for node i (gaussian)
+% L = log_prob_node(CPD, self_ev, pev)
+%
+% self_ev(m) is the evidence on this node in case m.
+% pev(i,m) is the evidence on the i'th parent in case m (if there are any parents).
+% (These may also be cell arrays.)
+
+if iscell(self_ev), usecell = 1; else usecell = 0; end
+
+use_log = 1;
+ncases = length(self_ev);
+nparents = length(CPD.sizes)-1;
+assert(ncases == size(pev, 2));
+
+if ncases == 0
+ L = 0;
+ return;
+end
+
+if length(CPD.dps)==0 % no discrete parents, so we can vectorize
+ i = 1;
+ if usecell
+ Y = cell2num(self_ev);
+ else
+ Y = self_ev;
+ end
+ if length(CPD.cps) == 0
+ L = gaussian_prob(Y, CPD.mean(:,i), CPD.cov(:,:,i), use_log);
+ else
+ if usecell
+ X = cell2num(pev);
+ else
+ X = pev;
+ end
+ L = gaussian_prob(Y, CPD.mean(:,i) + CPD.weights(:,:,i)*X, CPD.cov(:,:,i), use_log);
+ end
+else % each case uses a (potentially) different set of parameters
+ L = 0;
+ for m=1:ncases
+ if usecell
+ dpvals = cat(1, pev{CPD.dps, m});
+ else
+ dpvals = pev(CPD.dps, m);
+ end
+ i = subv2ind(CPD.sizes(CPD.dps), dpvals(:)');
+ y = self_ev{m};
+ if length(CPD.cps) == 0
+ L = L + gaussian_prob(y, CPD.mean(:,i), CPD.cov(:,:,i), use_log);
+ else
+ if usecell
+ x = cat(1, pev{CPD.cps, m});
+ else
+ x = pev(CPD.cps, m);
+ end
+ L = L + gaussian_prob(y, CPD.mean(:,i) + CPD.weights(:,:,i)*x, CPD.cov(:,:,i), use_log);
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/maximize_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/maximize_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,147 @@
+function CPD = maximize_params(CPD, temp)
+% MAXIMIZE_PARAMS Set the params of a CPD to their ML values (Gaussian)
+% CPD = maximize_params(CPD, temperature)
+%
+% Temperature is currently only used for entropic prior on Sigma
+
+% For details, see "Fitting a Conditional Gaussian Distribution", Kevin Murphy, tech. report,
+% 1998, available at www.cs.berkeley.edu/~murphyk/papers.html
+% Refering to table 2, we use equations 1/2 to estimate the covariance matrix in the untied/tied case,
+% and equation 9 to estimate the weight matrix and mean.
+% We do not implement spherical Gaussians - the code is already pretty complicated!
+
+if ~adjustable_CPD(CPD), return; end
+
+%assert(approxeq(CPD.nsamples, sum(CPD.Wsum)));
+assert(~any(isnan(CPD.WXXsum)))
+assert(~any(isnan(CPD.WXYsum)))
+assert(~any(isnan(CPD.WYYsum)))
+
+[self_size cpsize dpsize] = size(CPD.weights);
+
+% Append 1s to the parents, and derive the corresponding cross products.
+% This is used when estimate the means and weights simultaneosuly,
+% and when estimatting Sigma.
+% Let x2 = [x 1]'
+XY = zeros(cpsize+1, self_size, dpsize); % XY(:,:,i) = sum_l w(l,i) x2(l) y(l)'
+XX = zeros(cpsize+1, cpsize+1, dpsize); % XX(:,:,i) = sum_l w(l,i) x2(l) x2(l)'
+YY = zeros(self_size, self_size, dpsize); % YY(:,:,i) = sum_l w(l,i) y(l) y(l)'
+for i=1:dpsize
+ XY(:,:,i) = [CPD.WXYsum(:,:,i) % X*Y
+ CPD.WYsum(:,i)']; % 1*Y
+ % [x * [x' 1] = [xx' x
+ % 1] x' 1]
+ XX(:,:,i) = [CPD.WXXsum(:,:,i) CPD.WXsum(:,i);
+ CPD.WXsum(:,i)' CPD.Wsum(i)];
+ YY(:,:,i) = CPD.WYYsum(:,:,i);
+end
+
+w = CPD.Wsum(:);
+% Set any zeros to one before dividing
+% This is valid because w(i)=0 => WYsum(:,i)=0, etc
+w = w + (w==0);
+
+if CPD.clamped_mean
+ % Estimating B2 and then setting the last column (the mean) to the clamped mean is *not* equivalent
+ % to estimating B and then adding the clamped_mean to the last column.
+ if ~CPD.clamped_weights
+ B = zeros(self_size, cpsize, dpsize);
+ for i=1:dpsize
+ if det(CPD.WXXsum(:,:,i))==0
+ B(:,:,i) = 0;
+ else
+ % Eqn 9 in table 2 of TR
+ %B(:,:,i) = CPD.WXYsum(:,:,i)' * inv(CPD.WXXsum(:,:,i));
+ B(:,:,i) = (CPD.WXXsum(:,:,i) \ CPD.WXYsum(:,:,i))';
+ end
+ end
+ %CPD.weights = reshape(B, [self_size cpsize dpsize]);
+ CPD.weights = B;
+ end
+elseif CPD.clamped_weights % KPM 1/25/02
+ if ~CPD.clamped_mean % ML estimate is just sample mean of the residuals
+ for i=1:dpsize
+ CPD.mean(:,i) = (CPD.WYsum(:,i) - CPD.weights(:,:,i) * CPD.WXsum(:,i)) / w(i);
+ end
+ end
+else % nothing is clamped, so estimate mean and weights simultaneously
+ B2 = zeros(self_size, cpsize+1, dpsize);
+ for i=1:dpsize
+ if det(XX(:,:,i))==0 % fix by U. Sondhauss 6/27/99
+ B2(:,:,i)=0;
+ else
+ % Eqn 9 in table 2 of TR
+ %B2(:,:,i) = XY(:,:,i)' * inv(XX(:,:,i));
+ B2(:,:,i) = (XX(:,:,i) \ XY(:,:,i))';
+ end
+ CPD.mean(:,i) = B2(:,cpsize+1,i);
+ CPD.weights(:,:,i) = B2(:,1:cpsize,i);
+ end
+end
+
+% Let B2 = [W mu]
+if cpsize>0
+ B2(:,1:cpsize,:) = reshape(CPD.weights, [self_size cpsize dpsize]);
+end
+B2(:,cpsize+1,:) = reshape(CPD.mean, [self_size dpsize]);
+
+% To avoid singular covariance matrices,
+% we use the regularization method suggested in "A Quasi-Bayesian approach to estimating
+% parameters for mixtures of normal distributions", Hamilton 91.
+% If the ML estimate is Sigma = M/N, the MAP estimate is (M+gamma*I) / (N+gamma),
+% where gamma >=0 is a smoothing parameter (equivalent sample size of I prior)
+
+gamma = CPD.cov_prior_weight;
+
+if ~CPD.clamped_cov
+ if CPD.cov_prior_entropic % eqn 12 of Brand AI/Stat 99
+ Z = 1-temp;
+ % When temp > 1, Z is negative, so we are dividing by a smaller
+ % number, ie. increasing the variance.
+ else
+ Z = 0;
+ end
+ if CPD.tied_cov
+ S = zeros(self_size, self_size);
+ % Eqn 2 from table 2 in TR
+ for i=1:dpsize
+ S = S + (YY(:,:,i) - B2(:,:,i)*XY(:,:,i));
+ end
+ %denom = max(1, CPD.nsamples + gamma + Z);
+ denom = CPD.nsamples + gamma + Z;
+ S = (S + gamma*eye(self_size)) / denom;
+ if strcmp(CPD.cov_type, 'diag')
+ S = diag(diag(S));
+ end
+ CPD.cov = repmat(S, [1 1 dpsize]);
+ else
+ for i=1:dpsize
+ % Eqn 1 from table 2 in TR
+ S = YY(:,:,i) - B2(:,:,i)*XY(:,:,i);
+ %denom = max(1, w(i) + gamma + Z); % gives wrong answers on mhmm1
+ denom = w(i) + gamma + Z;
+ S = (S + gamma*eye(self_size)) / denom;
+ CPD.cov(:,:,i) = S;
+ end
+ if strcmp(CPD.cov_type, 'diag')
+ for i=1:dpsize
+ CPD.cov(:,:,i) = diag(diag(CPD.cov(:,:,i)));
+ end
+ end
+ end
+end
+
+
+check_covars = 0;
+min_covar = 1e-5;
+if check_covars % prevent collapsing to a point
+ for i=1:dpsize
+ if min(svd(CPD.cov(:,:,i))) < min_covar
+ disp(['resetting singular covariance for node ' num2str(CPD.self)]);
+ CPD.cov(:,:,i) = CPD.init_cov(:,:,i);
+ end
+ end
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/update_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/update_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,85 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a Gaussian node
+% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+
+%if nargin < 6
+% hidden_bitv = zeros(1, max(fmarginal.domain));
+% hidden_bitv(find(isempty(evidence)))=1;
+%end
+
+dom = fmarginal.domain;
+self = dom(end);
+ps = dom(1:end-1);
+hidden_self = hidden_bitv(self);
+cps = myintersect(ps, cnodes);
+dps = mysetdiff(ps, cps);
+hidden_cps = all(hidden_bitv(cps));
+hidden_dps = all(hidden_bitv(dps));
+
+CPD.nsamples = CPD.nsamples + 1;
+[ss cpsz dpsz] = size(CPD.weights); % ss = self size
+
+% Let X be the cts parent (if any), Y be the cts child (self).
+
+if ~hidden_self & (isempty(cps) | ~hidden_cps) & hidden_dps % all cts nodes are observed, all discrete nodes are hidden
+ % Since X and Y are observed, SYY = 0, SXX = 0, SXY = 0
+ % Since discrete parents are hidden, we do not need to add evidence to w.
+ w = fmarginal.T(:);
+ CPD.Wsum = CPD.Wsum + w;
+ y = evidence{self};
+ Cyy = y*y';
+ if ~CPD.useC
+ W = repmat(w(:)',ss,1); % W(y,i) = w(i)
+ W2 = repmat(reshape(W, [ss 1 dpsz]), [1 ss 1]); % W2(x,y,i) = w(i)
+ CPD.WYsum = CPD.WYsum + W .* repmat(y(:), 1, dpsz);
+ CPD.WYYsum = CPD.WYYsum + W2 .* repmat(reshape(Cyy, [ss ss 1]), [1 1 dpsz]);
+ else
+ W = w(:)';
+ W2 = reshape(W, [1 1 dpsz]);
+ CPD.WYsum = CPD.WYsum + rep_mult(W, y(:), size(CPD.WYsum));
+ CPD.WYYsum = CPD.WYYsum + rep_mult(W2, Cyy, size(CPD.WYYsum));
+ end
+ if cpsz > 0 % X exists
+ x = cat(1, evidence{cps}); x = x(:);
+ Cxx = x*x';
+ Cxy = x*y';
+ if ~CPD.useC
+ CPD.WXsum = CPD.WXsum + W .* repmat(x(:), 1, dpsz);
+ CPD.WXXsum = CPD.WXXsum + W2 .* repmat(reshape(Cxx, [cpsz cpsz 1]), [1 1 dpsz]);
+ CPD.WXYsum = CPD.WXYsum + W2 .* repmat(reshape(Cxy, [cpsz ss 1]), [1 1 dpsz]);
+ else
+ CPD.WXsum = CPD.WXsum + rep_mult(W, x(:), size(CPD.WXsum));
+ CPD.WXXsum = CPD.WXXsum + rep_mult(W2, Cxx, size(CPD.WXXsum));
+ CPD.WXYsum = CPD.WXYsum + rep_mult(W2, Cxy, size(CPD.WXYsum));
+ end
+ end
+ return;
+end
+
+% general (non-vectorized) case
+fullm = add_evidence_to_gmarginal(fmarginal, evidence, ns, cnodes); % slow!
+
+if dpsz == 1 % no discrete parents
+ w = 1;
+else
+ w = fullm.T(:);
+end
+
+CPD.Wsum = CPD.Wsum + w;
+xi = 1:cpsz;
+yi = (cpsz+1):(cpsz+ss);
+for i=1:dpsz
+ muY = fullm.mu(yi, i);
+ SYY = fullm.Sigma(yi, yi, i);
+ CPD.WYsum(:,i) = CPD.WYsum(:,i) + w(i)*muY;
+ CPD.WYYsum(:,:,i) = CPD.WYYsum(:,:,i) + w(i)*(SYY + muY*muY'); % E[X Y] = Cov[X,Y] + E[X] E[Y]
+ if cpsz > 0
+ muX = fullm.mu(xi, i);
+ SXX = fullm.Sigma(xi, xi, i);
+ SXY = fullm.Sigma(xi, yi, i);
+ CPD.WXsum(:,i) = CPD.WXsum(:,i) + w(i)*muX;
+ CPD.WXXsum(:,:,i) = CPD.WXXsum(:,:,i) + w(i)*(SXX + muX*muX');
+ CPD.WXYsum(:,:,i) = CPD.WXYsum(:,:,i) + w(i)*(SXY + muX*muY');
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/update_tied_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/update_tied_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,118 @@
+function CPD = update_tied_ess(CPD, domain, engine, evidence, ns, cnodes)
+
+if ~adjustable_CPD(CPD), return; end
+nCPDs = size(domain, 2);
+fmarginal = cell(1, nCPDs);
+for l=1:nCPDs
+ fmarginal{l} = marginal_family(engine, nodes(l));
+end
+
+[ss cpsz dpsz] = size(CPD.weights);
+if const_evidence_pattern(engine)
+ dom = domain(:,1);
+ dnodes = mysetdiff(1:length(ns), cnodes);
+ ddom = myintersect(dom, dnodes);
+ cdom = myintersect(dom, cnodes);
+ odom = dom(~isemptycell(evidence(dom)));
+ hdom = dom(isemptycell(evidence(dom)));
+ % If all hidden nodes are discrete and all cts nodes are observed
+ % (e.g., HMM with Gaussian output)
+ % we can add the observed evidence in parallel
+ if mysubset(ddom, hdom) & mysubset(cdom, odom)
+ [mu, Sigma, T] = add_cts_ev_to_marginals(fmarginal, evidence, ns, cnodes);
+ else
+ mu = zeros(ss, dpsz, nCPDs);
+ Sigma = zeros(ss, ss, dpsz, nCPDs);
+ T = zeros(dpsz, nCPDs);
+ for l=1:nCPDs
+ [mu(:,:,l), Sigma(:,:,:,l), T(:,l)] = add_ev_to_marginals(fmarginal{l}, evidence, ns, cnodes);
+ end
+ end
+end
+CPD.nsamples = CPD.nsamples + nCPDs;
+
+
+if dpsz == 1 % no discrete parents
+ w = 1;
+else
+ w = fullm.T(:);
+end
+CPD.Wsum = CPD.Wsum + w;
+% Let X be the cts parent (if any), Y be the cts child (self).
+xi = 1:cpsz;
+yi = (cpsz+1):(cpsz+ss);
+for i=1:dpsz
+ muY = fullm.mu(yi, i);
+ SYY = fullm.Sigma(yi, yi, i);
+ CPD.WYsum(:,i) = CPD.WYsum(:,i) + w(i)*muY;
+ CPD.WYYsum(:,:,i) = CPD.WYYsum(:,:,i) + w(i)*(SYY + muY*muY'); % E[X Y] = Cov[X,Y] + E[X] E[Y]
+ if cpsz > 0
+ muX = fullm.mu(xi, i);
+ SXX = fullm.Sigma(xi, xi, i);
+ SXY = fullm.Sigma(xi, yi, i);
+ CPD.WXsum(:,i) = CPD.WXsum(:,i) + w(i)*muX;
+ CPD.WXYsum(:,:,i) = CPD.WXYsum(:,:,i) + w(i)*(SXY + muX*muY');
+ CPD.WXXsum(:,:,i) = CPD.WXXsum(:,:,i) + w(i)*(SXX + muX*muX');
+ end
+end
+
+
+%%%%%%%%%%%%%
+
+function fullm = add_evidence_to_marginal(fmarginal, evidence, ns, cnodes)
+
+
+dom = fmarginal.domain;
+
+% Find out which values of the discrete parents (if any) are compatible with
+% the discrete evidence (if any).
+dnodes = mysetdiff(1:length(ns), cnodes);
+ddom = myintersect(dom, dnodes);
+cdom = myintersect(dom, cnodes);
+odom = dom(~isemptycell(evidence(dom)));
+hdom = dom(isemptycell(evidence(dom)));
+
+dobs = myintersect(ddom, odom);
+dvals = cat(1, evidence{dobs});
+ens = ns; % effective node sizes
+ens(dobs) = 1;
+S = prod(ens(ddom));
+subs = ind2subv(ens(ddom), 1:S);
+mask = find_equiv_posns(dobs, ddom);
+subs(mask) = dvals;
+supportedQs = subv2ind(ns(ddom), subs);
+
+if isempty(ddom)
+ Qarity = 1;
+else
+ Qarity = prod(ns(ddom));
+end
+fullm.T = zeros(Qarity, 1);
+fullm.T(supportedQs) = fmarginal.T(:);
+
+% Now put the hidden cts parts into their right blocks,
+% leaving the observed cts parts as 0.
+cobs = myintersect(cdom, odom);
+chid = myintersect(cdom, hdom);
+cvals = cat(1, evidence{cobs});
+n = sum(ns(cdom));
+fullm.mu = zeros(n,Qarity);
+fullm.Sigma = zeros(n,n,Qarity);
+
+if ~isempty(chid)
+ chid_blocks = block(find_equiv_posns(chid, cdom), ns(cdom));
+end
+if ~isempty(cobs)
+ cobs_blocks = block(find_equiv_posns(cobs, cdom), ns(cdom));
+end
+
+for i=1:length(supportedQs)
+ Q = supportedQs(i);
+ if ~isempty(chid)
+ fullm.mu(chid_blocks, Q) = fmarginal.mu(:, i);
+ fullm.Sigma(chid_blocks, chid_blocks, Q) = fmarginal.Sigma(:,:,i);
+ end
+ if ~isempty(cobs)
+ fullm.mu(cobs_blocks, Q) = cvals(:);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/adjustable_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/adjustable_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function p = adjustable_CPD(CPD)
+% ADJUSTABLE_CPD Does this CPD have any adjustable params? (gaussian)
+% p = adjustable_CPD(CPD)
+
+p = ~CPD.clamped_mean | ~CPD.clamped_cov | ~CPD.clamped_weights;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_CPD_to_table_hidden_ps.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_CPD_to_table_hidden_ps.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+function T = convert_CPD_to_table_hidden_ps(CPD, self_val)
+% CONVERT_CPD_TO_TABLE_HIDDEN_PS Convert a Gaussian CPD to a table
+% function T = convert_CPD_to_table_hidden_ps(CPD, self_val)
+%
+% self_val must be a non-empty vector.
+% All the parents are hidden.
+%
+% This is used by misc/convert_dbn_CPDs_to_tables
+
+m = CPD.mean;
+C = CPD.cov;
+W = CPD.weights;
+
+[ssz dpsize] = size(m);
+
+T = zeros(dpsize, 1);
+for i=1:dpsize
+ T(i) = gaussian_prob(self_val, m(:,i), C(:,:,i));
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_to_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_to_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+function pot = convert_to_pot(CPD, pot_type, domain, evidence)
+% CONVERT_TO_POT Convert a Gaussian CPD to one or more potentials
+% pot = convert_to_pot(CPD, pot_type, domain, evidence)
+
+sz = CPD.sizes;
+ns = zeros(1, max(domain));
+ns(domain) = sz;
+
+odom = domain(~isemptycell(evidence(domain)));
+ps = domain(1:end-1);
+cps = ps(CPD.cps);
+dps = ps(CPD.dps);
+self = domain(end);
+cdom = [cps(:)' self];
+ddom = dps;
+cnodes = cdom;
+
+switch pot_type
+ case 'u',
+ error('gaussian utility potentials not yet supported');
+
+ case 'd',
+ T = convert_to_table(CPD, domain, evidence);
+ ns(odom) = 1;
+ pot = dpot(domain, ns(domain), T);
+
+ case {'c','g'},
+ [m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence);
+ pot = linear_gaussian_to_cpot(m, C, W, domain, ns, cnodes, evidence);
+
+ case 'cg',
+ [m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence);
+ % Convert each conditional Gaussian to a canonical potential
+ cobs = myintersect(cdom, odom);
+ dobs = myintersect(ddom, odom);
+ ens = ns; % effective node size
+ ens(cobs) = 0;
+ ens(dobs) = 1;
+ dpsize = prod(ens(dps));
+ can = cell(1, dpsize);
+ for i=1:dpsize
+ if isempty(W)
+ can{i} = linear_gaussian_to_cpot(m(:,i), C(:,:,i), [], cdom, ns, cnodes, evidence);
+ else
+ can{i} = linear_gaussian_to_cpot(m(:,i), C(:,:,i), W(:,:,i), cdom, ns, cnodes, evidence);
+ end
+ end
+ pot = cgpot(ddom, cdom, ens, can);
+
+ case 'scg',
+ [m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence);
+ cobs = myintersect(cdom, odom);
+ dobs = myintersect(ddom, odom);
+ ens = ns; % effective node size
+ ens(cobs) = 0;
+ ens(dobs) = 1;
+ dpsize = prod(ens(dps));
+ cpsize = size(W, 2); % cts parents size
+ ss = size(m, 1); % self size
+ cheaddom = self;
+ ctaildom = cps(:)';
+ pot_array = cell(1, dpsize);
+ for i=1:dpsize
+ pot_array{i} = scgcpot(ss, cpsize, 1, m(:,i), W(:,:,i), C(:,:,i));
+ end
+ pot = scgpot(ddom, cheaddom, ctaildom, ens, pot_array);
+
+ otherwise,
+ error(['unrecognized pot_type' pot_type])
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_to_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_to_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function T = convert_to_table(CPD, domain, evidence)
+% CONVERT_TO_TABLE Convert a Gaussian CPD to a table
+% T = convert_to_table(CPD, domain, evidence)
+
+
+sz = CPD.sizes;
+ns = zeros(1, max(domain));
+ns(domain) = sz;
+
+odom = domain(~isemptycell(evidence(domain)));
+ps = domain(1:end-1);
+cps = ps(CPD.cps);
+dps = ps(CPD.dps);
+self = domain(end);
+cdom = [cps(:)' self];
+ddom = dps;
+cnodes = cdom;
+
+[m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence);
+
+
+ns(odom) = 1;
+dpsize = prod(ns(dps));
+self = domain(end);
+assert(myismember(self, odom));
+self_val = evidence{self};
+T = zeros(dpsize, 1);
+if length(cps) > 0
+ assert(~any(isemptycell(evidence(cps))));
+ cps_vals = cat(1, evidence{cps});
+ for i=1:dpsize
+ T(i) = gaussian_prob(self_val, m(:,i) + W(:,:,i)*cps_vals, C(:,:,i));
+ end
+else
+ for i=1:dpsize
+ T(i) = gaussian_prob(self_val, m(:,i), C(:,:,i));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(CPD)
+
+disp('gaussian_CPD object');
+disp(struct(CPD));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/gaussian_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/gaussian_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,161 @@
+function CPD = gaussian_CPD(bnet, self, varargin)
+% GAUSSIAN_CPD Make a conditional linear Gaussian distrib.
+%
+% CPD = gaussian_CPD(bnet, node, ...) will create a CPD with random parameters,
+% where node is the number of a node in this equivalence class.
+
+% To define this CPD precisely, call the continuous (cts) parents (if any) X,
+% the discrete parents (if any) Q, and this node Y. Then the distribution on Y is:
+% - no parents: Y ~ N(mu, Sigma)
+% - cts parents : Y|X=x ~ N(mu + W x, Sigma)
+% - discrete parents: Y|Q=i ~ N(mu(i), Sigma(i))
+% - cts and discrete parents: Y|X=x,Q=i ~ N(mu(i) + W(i) x, Sigma(i))
+%
+% The list below gives optional arguments [default value in brackets].
+% (Let ns(i) be the size of node i, X = ns(X), Y = ns(Y) and Q = prod(ns(Q)).)
+% Parameters will be reshaped to the right size if necessary.
+%
+% mean - mu(:,i) is the mean given Q=i [ randn(Y,Q) ]
+% cov - Sigma(:,:,i) is the covariance given Q=i [ repmat(100*eye(Y,Y), [1 1 Q]) ]
+% weights - W(:,:,i) is the regression matrix given Q=i [ randn(Y,X,Q) ]
+% cov_type - if 'diag', Sigma(:,:,i) is diagonal [ 'full' ]
+% tied_cov - if 1, we constrain Sigma(:,:,i) to be the same for all i [0]
+% clamp_mean - if 1, we do not adjust mu(:,i) during learning [0]
+% clamp_cov - if 1, we do not adjust Sigma(:,:,i) during learning [0]
+% clamp_weights - if 1, we do not adjust W(:,:,i) during learning [0]
+% cov_prior_weight - weight given to I prior for estimating Sigma [0.01]
+% cov_prior_entropic - if 1, we also use an entropic prior for Sigma [0]
+%
+% e.g., CPD = gaussian_CPD(bnet, i, 'mean', [0; 0], 'clamp_mean', 1)
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ clamp = 0;
+ CPD = class(CPD, 'gaussian_CPD', generic_CPD(clamp));
+ return;
+elseif isa(bnet, 'gaussian_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+CPD = class(CPD, 'gaussian_CPD', generic_CPD(0));
+
+args = varargin;
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, self);
+dps = myintersect(ps, bnet.dnodes);
+cps = myintersect(ps, bnet.cnodes);
+fam_sz = ns([ps self]);
+
+CPD.self = self;
+CPD.sizes = fam_sz;
+
+% Figure out which (if any) of the parents are discrete, and which cts, and how big they are
+% dps = discrete parents, cps = cts parents
+CPD.cps = find_equiv_posns(cps, ps); % cts parent index
+CPD.dps = find_equiv_posns(dps, ps);
+ss = fam_sz(end);
+psz = fam_sz(1:end-1);
+dpsz = prod(psz(CPD.dps));
+cpsz = sum(psz(CPD.cps));
+
+% set default params
+CPD.mean = randn(ss, dpsz);
+CPD.cov = 100*repmat(eye(ss), [1 1 dpsz]);
+CPD.weights = randn(ss, cpsz, dpsz);
+CPD.cov_type = 'full';
+CPD.tied_cov = 0;
+CPD.clamped_mean = 0;
+CPD.clamped_cov = 0;
+CPD.clamped_weights = 0;
+CPD.cov_prior_weight = 0.01;
+CPD.cov_prior_entropic = 0;
+nargs = length(args);
+if nargs > 0
+ CPD = set_fields(CPD, args{:});
+end
+
+% Make sure the matrices have 1 dimension per discrete parent.
+% Bug fix due to Xuejing Sun 3/6/01
+CPD.mean = myreshape(CPD.mean, [ss ns(dps)]);
+CPD.cov = myreshape(CPD.cov, [ss ss ns(dps)]);
+CPD.weights = myreshape(CPD.weights, [ss cpsz ns(dps)]);
+
+% Precompute indices into block structured matrices
+% to speed up CPD_to_lambda_msg and CPD_to_pi
+cpsizes = CPD.sizes(CPD.cps);
+CPD.cps_block_ndx = cell(1, length(cps));
+for i=1:length(cps)
+ CPD.cps_block_ndx{i} = block(i, cpsizes);
+end
+
+%%%%%%%%%%%
+% Learning stuff
+
+% expected sufficient statistics
+CPD.Wsum = zeros(dpsz,1);
+CPD.WYsum = zeros(ss, dpsz);
+CPD.WXsum = zeros(cpsz, dpsz);
+CPD.WYYsum = zeros(ss, ss, dpsz);
+CPD.WXXsum = zeros(cpsz, cpsz, dpsz);
+CPD.WXYsum = zeros(cpsz, ss, dpsz);
+
+% For BIC
+CPD.nsamples = 0;
+switch CPD.cov_type
+ case 'full',
+ % since symmetric
+ %ncov_params = ss*(ss-1)/2;
+ ncov_params = ss*(ss+1)/2;
+ case 'diag',
+ ncov_params = ss;
+ otherwise
+ error(['unrecognized cov_type ' cov_type]);
+end
+% params = weights + mean + cov
+if CPD.tied_cov
+ CPD.nparams = ss*cpsz*dpsz + ss*dpsz + ncov_params;
+else
+ CPD.nparams = ss*cpsz*dpsz + ss*dpsz + dpsz*ncov_params;
+end
+
+% for speeding up maximize_params
+CPD.useC = exist('rep_mult');
+
+clamped = CPD.clamped_mean & CPD.clamped_cov & CPD.clamped_weights;
+CPD = set_clamped(CPD, clamped);
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.self = [];
+CPD.sizes = [];
+CPD.cps = [];
+CPD.dps = [];
+CPD.mean = [];
+CPD.cov = [];
+CPD.weights = [];
+CPD.clamped_mean = [];
+CPD.clamped_cov = [];
+CPD.clamped_weights = [];
+CPD.cov_type = [];
+CPD.tied_cov = [];
+CPD.Wsum = [];
+CPD.WYsum = [];
+CPD.WXsum = [];
+CPD.WYYsum = [];
+CPD.WXXsum = [];
+CPD.WXYsum = [];
+CPD.nsamples = [];
+CPD.nparams = [];
+CPD.cov_prior_weight = [];
+CPD.cov_prior_entropic = [];
+CPD.useC = [];
+CPD.cps_block_ndx = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/gaussian_CPD_params_given_dps.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/gaussian_CPD_params_given_dps.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function [m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence)
+% GAUSSIAN_CPD_PARAMS_GIVEN_EV_ON_DPS Extract parameters given evidence on all discrete parents
+% function [m, C, W] = gaussian_CPD_params_given_ev_on_dps(CPD, domain, evidence)
+
+ps = domain(1:end-1);
+dps = ps(CPD.dps);
+if isempty(dps)
+ m = CPD.mean;
+ C = CPD.cov;
+ W = CPD.weights;
+else
+ odom = domain(~isemptycell(evidence(domain)));
+ dops = myintersect(dps, odom);
+ dpvals = cat(1, evidence{dops});
+ if length(dops) == length(dps)
+ dpsizes = CPD.sizes(CPD.dps);
+ dpval = subv2ind(dpsizes, dpvals(:)');
+ m = CPD.mean(:, dpval);
+ C = CPD.cov(:, :, dpval);
+ W = CPD.weights(:, :, dpval);
+ else
+ map = find_equiv_posns(dops, dps);
+ index = mk_multi_index(length(dps), map, dpvals);
+ m = CPD.mean(:, index{:});
+ C = CPD.cov(:, :, index{:});
+ W = CPD.weights(:, :, index{:});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/get_field.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/get_field.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function val = get_params(CPD, name)
+% GET_PARAMS Get the parameters (fields) for a gaussian_CPD object
+% val = get_params(CPD, name)
+%
+% The following fields can be accessed
+%
+% mean - mu(:,i) is the mean given Q=i
+% cov - Sigma(:,:,i) is the covariance given Q=i
+% weights - W(:,:,i) is the regression matrix given Q=i
+%
+% e.g., mean = get_params(CPD, 'mean')
+
+switch name
+ case 'mean', val = CPD.mean;
+ case 'cov', val = CPD.cov;
+ case 'weights', val = CPD.weights;
+ otherwise,
+ error(['invalid argument name ' name]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/learn_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/learn_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+function CPD = learn_params(CPD, fam, data, ns, cnodes)
+%function CPD = learn_params(CPD, fam, data, ns, cnodes)
+% LEARN_PARAMS Compute the maximum likelihood estimate of the params of a gaussian CPD given complete data
+% CPD = learn_params(CPD, fam, data, ns, cnodes)
+%
+% data(i,m) is the value of node i in case m (can be cell array).
+% We assume this node has a maximize_params method.
+
+ncases = size(data, 2);
+CPD = reset_ess(CPD);
+% make a fully observed joint distribution over the family
+fmarginal.domain = fam;
+fmarginal.T = 1;
+fmarginal.mu = [];
+fmarginal.Sigma = [];
+if ~iscell(data)
+ cases = num2cell(data);
+else
+ cases = data;
+end
+hidden_bitv = zeros(1, max(fam));
+for m=1:ncases
+ % specify (as a bit vector) which elements in the family domain are hidden
+ hidden_bitv = zeros(1, max(fmarginal.domain));
+ ev = cases(:,m);
+ hidden_bitv(find(isempty(ev)))=1;
+ CPD = update_ess(CPD, fmarginal, ev, ns, cnodes, hidden_bitv);
+end
+CPD = maximize_params(CPD);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/log_prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/log_prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function L = log_prob_node(CPD, self_ev, pev)
+% LOG_PROB_NODE Compute prod_m log P(x(i,m)| x(pi_i,m), theta_i) for node i (gaussian)
+% L = log_prob_node(CPD, self_ev, pev)
+%
+% self_ev(m) is the evidence on this node in case m.
+% pev(i,m) is the evidence on the i'th parent in case m (if there are any parents).
+% (These may also be cell arrays.)
+
+if iscell(self_ev), usecell = 1; else usecell = 0; end
+
+use_log = 1;
+ncases = length(self_ev);
+nparents = length(CPD.sizes)-1;
+assert(ncases == size(pev, 2));
+
+if ncases == 0
+ L = 0;
+ return;
+end
+
+L = 0;
+for m=1:ncases
+ if isempty(CPD.dps)
+ i = 1;
+ else
+ if usecell
+ dpvals = cat(1, pev{CPD.dps, m});
+ else
+ dpvals = pev(CPD.dps, m);
+ end
+ i = subv2ind(CPD.sizes(CPD.dps), dpvals(:)');
+ end
+ if usecell
+ y = self_ev{m};
+ else
+ y = self_ev(m);
+ end
+ if length(CPD.cps) == 0
+ L = L + gaussian_prob(y, CPD.mean(:,i), CPD.cov(:,:,i), use_log);
+ else
+ if usecell
+ x = cat(1, pev{CPD.cps, m});
+ else
+ x = pev(CPD.cps, m);
+ end
+ L = L + gaussian_prob(y, CPD.mean(:,i) + CPD.weights(:,:,i)*x, CPD.cov(:,:,i), use_log);
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/maximize_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/maximize_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,68 @@
+function CPD = maximize_params(CPD, temp)
+% MAXIMIZE_PARAMS Set the params of a CPD to their ML values (Gaussian)
+% CPD = maximize_params(CPD, temperature)
+%
+% Temperature is currently ignored.
+
+if ~adjustable_CPD(CPD), return; end
+
+
+if CPD.clamped_mean
+ cl_mean = CPD.mean;
+else
+ cl_mean = [];
+end
+
+if CPD.clamped_cov
+ cl_cov = CPD.cov;
+else
+ cl_cov = [];
+end
+
+if CPD.clamped_weights
+ cl_weights = CPD.weights;
+else
+ cl_weights = [];
+end
+
+[ssz psz Q] = size(CPD.weights);
+
+[ss cpsz dpsz] = size(CPD.weights); % ss = self size = ssz
+if cpsz > CPD.nsamples
+ fprintf('gaussian_CPD/maximize_params: warning: input dimension (%d) > nsamples (%d)\n', ...
+ cpsz, CPD.nsamples);
+end
+
+prior = repmat(CPD.cov_prior_weight*eye(ssz,ssz), [1 1 Q]);
+
+
+[CPD.mean, CPD.cov, CPD.weights] = ...
+ clg_Mstep(CPD.Wsum, CPD.WYsum, CPD.WYYsum, [], CPD.WXsum, CPD.WXXsum, CPD.WXYsum, ...
+ 'cov_type', CPD.cov_type, 'clamped_mean', cl_mean, ...
+ 'clamped_cov', cl_cov, 'clamped_weights', cl_weights, ...
+ 'tied_cov', CPD.tied_cov, ...
+ 'cov_prior', prior);
+
+if 0
+CPD.mean = reshape(CPD.mean, [ss dpsz]);
+CPD.cov = reshape(CPD.cov, [ss ss dpsz]);
+CPD.weights = reshape(CPD.weights, [ss cpsz dpsz]);
+end
+
+% Bug fix 11 May 2003 KPM
+% clg_Mstep collapses all discrete parents into one mega-node
+% but convert_to_CPT needs access to each parent separately
+sz = CPD.sizes;
+ss = sz(end);
+
+% Bug fix KPM 20 May 2003:
+cpsz = sum(sz(CPD.cps));
+%if isempty(CPD.cps)
+% cpsz = 0;
+%else
+% cpsz = sz(CPD.cps);
+%end
+dpsz = sz(CPD.dps);
+CPD.mean = myreshape(CPD.mean, [ss dpsz]);
+CPD.cov = myreshape(CPD.cov, [ss ss dpsz]);
+CPD.weights = myreshape(CPD.weights, [ss cpsz dpsz]);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/maximize_params_debug.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/maximize_params_debug.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,189 @@
+function CPD = maximize_params(CPD, temp)
+% MAXIMIZE_PARAMS Set the params of a CPD to their ML values (Gaussian)
+% CPD = maximize_params(CPD, temperature)
+%
+% Temperature is currently ignored.
+
+if ~adjustable_CPD(CPD), return; end
+
+CPD1 = struct(new_maximize_params(CPD));
+CPD2 = struct(old_maximize_params(CPD));
+assert(approxeq(CPD1.mean, CPD2.mean))
+assert(approxeq(CPD1.cov, CPD2.cov))
+assert(approxeq(CPD1.weights, CPD2.weights))
+
+CPD = new_maximize_params(CPD);
+
+%%%%%%%
+function CPD = new_maximize_params(CPD)
+
+if CPD.clamped_mean
+ cl_mean = CPD.mean;
+else
+ cl_mean = [];
+end
+
+if CPD.clamped_cov
+ cl_cov = CPD.cov;
+else
+ cl_cov = [];
+end
+
+if CPD.clamped_weights
+ cl_weights = CPD.weights;
+else
+ cl_weights = [];
+end
+
+[ssz psz Q] = size(CPD.weights);
+
+prior = repmat(CPD.cov_prior_weight*eye(ssz,ssz), [1 1 Q]);
+[CPD.mean, CPD.cov, CPD.weights] = ...
+ Mstep_clg('w', CPD.Wsum, 'YY', CPD.WYYsum, 'Y', CPD.WYsum, 'YTY', [], ...
+ 'XX', CPD.WXXsum, 'XY', CPD.WXYsum, 'X', CPD.WXsum, ...
+ 'cov_type', CPD.cov_type, 'clamped_mean', cl_mean, ...
+ 'clamped_cov', cl_cov, 'clamped_weights', cl_weights, ...
+ 'tied_cov', CPD.tied_cov, ...
+ 'cov_prior', prior);
+
+
+%%%%%%%%%%%
+
+function CPD = old_maximize_params(CPD)
+
+
+if ~adjustable_CPD(CPD), return; end
+
+%assert(approxeq(CPD.nsamples, sum(CPD.Wsum)));
+assert(~any(isnan(CPD.WXXsum)))
+assert(~any(isnan(CPD.WXYsum)))
+assert(~any(isnan(CPD.WYYsum)))
+
+[self_size cpsize dpsize] = size(CPD.weights);
+
+% Append 1s to the parents, and derive the corresponding cross products.
+% This is used when estimate the means and weights simultaneosuly,
+% and when estimatting Sigma.
+% Let x2 = [x 1]'
+XY = zeros(cpsize+1, self_size, dpsize); % XY(:,:,i) = sum_l w(l,i) x2(l) y(l)'
+XX = zeros(cpsize+1, cpsize+1, dpsize); % XX(:,:,i) = sum_l w(l,i) x2(l) x2(l)'
+YY = zeros(self_size, self_size, dpsize); % YY(:,:,i) = sum_l w(l,i) y(l) y(l)'
+for i=1:dpsize
+ XY(:,:,i) = [CPD.WXYsum(:,:,i) % X*Y
+ CPD.WYsum(:,i)']; % 1*Y
+ % [x * [x' 1] = [xx' x
+ % 1] x' 1]
+ XX(:,:,i) = [CPD.WXXsum(:,:,i) CPD.WXsum(:,i);
+ CPD.WXsum(:,i)' CPD.Wsum(i)];
+ YY(:,:,i) = CPD.WYYsum(:,:,i);
+end
+
+w = CPD.Wsum(:);
+% Set any zeros to one before dividing
+% This is valid because w(i)=0 => WYsum(:,i)=0, etc
+w = w + (w==0);
+
+if CPD.clamped_mean
+ % Estimating B2 and then setting the last column (the mean) to the clamped mean is *not* equivalent
+ % to estimating B and then adding the clamped_mean to the last column.
+ if ~CPD.clamped_weights
+ B = zeros(self_size, cpsize, dpsize);
+ for i=1:dpsize
+ if det(CPD.WXXsum(:,:,i))==0
+ B(:,:,i) = 0;
+ else
+ % Eqn 9 in table 2 of TR
+ %B(:,:,i) = CPD.WXYsum(:,:,i)' * inv(CPD.WXXsum(:,:,i));
+ B(:,:,i) = (CPD.WXXsum(:,:,i) \ CPD.WXYsum(:,:,i))';
+ end
+ end
+ %CPD.weights = reshape(B, [self_size cpsize dpsize]);
+ CPD.weights = B;
+ end
+elseif CPD.clamped_weights % KPM 1/25/02
+ if ~CPD.clamped_mean % ML estimate is just sample mean of the residuals
+ for i=1:dpsize
+ CPD.mean(:,i) = (CPD.WYsum(:,i) - CPD.weights(:,:,i) * CPD.WXsum(:,i)) / w(i);
+ end
+ end
+else % nothing is clamped, so estimate mean and weights simultaneously
+ B2 = zeros(self_size, cpsize+1, dpsize);
+ for i=1:dpsize
+ if det(XX(:,:,i))==0 % fix by U. Sondhauss 6/27/99
+ B2(:,:,i)=0;
+ else
+ % Eqn 9 in table 2 of TR
+ %B2(:,:,i) = XY(:,:,i)' * inv(XX(:,:,i));
+ B2(:,:,i) = (XX(:,:,i) \ XY(:,:,i))';
+ end
+ CPD.mean(:,i) = B2(:,cpsize+1,i);
+ CPD.weights(:,:,i) = B2(:,1:cpsize,i);
+ end
+end
+
+% Let B2 = [W mu]
+if cpsize>0
+ B2(:,1:cpsize,:) = reshape(CPD.weights, [self_size cpsize dpsize]);
+end
+B2(:,cpsize+1,:) = reshape(CPD.mean, [self_size dpsize]);
+
+% To avoid singular covariance matrices,
+% we use the regularization method suggested in "A Quasi-Bayesian approach to estimating
+% parameters for mixtures of normal distributions", Hamilton 91.
+% If the ML estimate is Sigma = M/N, the MAP estimate is (M+gamma*I) / (N+gamma),
+% where gamma >=0 is a smoothing parameter (equivalent sample size of I prior)
+
+gamma = CPD.cov_prior_weight;
+
+if ~CPD.clamped_cov
+ if CPD.cov_prior_entropic % eqn 12 of Brand AI/Stat 99
+ Z = 1-temp;
+ % When temp > 1, Z is negative, so we are dividing by a smaller
+ % number, ie. increasing the variance.
+ else
+ Z = 0;
+ end
+ if CPD.tied_cov
+ S = zeros(self_size, self_size);
+ % Eqn 2 from table 2 in TR
+ for i=1:dpsize
+ S = S + (YY(:,:,i) - B2(:,:,i)*XY(:,:,i));
+ end
+ %denom = CPD.nsamples + gamma + Z;
+ denom = CPD.nsamples + Z;
+ S = (S + gamma*eye(self_size)) / denom;
+ if strcmp(CPD.cov_type, 'diag')
+ S = diag(diag(S));
+ end
+ CPD.cov = repmat(S, [1 1 dpsize]);
+ else
+ for i=1:dpsize
+ % Eqn 1 from table 2 in TR
+ S = YY(:,:,i) - B2(:,:,i)*XY(:,:,i);
+ %denom = w(i) + gamma + Z;
+ denom = w(i) + Z;
+ S = (S + gamma*eye(self_size)) / denom;
+ CPD.cov(:,:,i) = S;
+ end
+ if strcmp(CPD.cov_type, 'diag')
+ for i=1:dpsize
+ CPD.cov(:,:,i) = diag(diag(CPD.cov(:,:,i)));
+ end
+ end
+ end
+end
+
+
+check_covars = 0;
+min_covar = 1e-5;
+if check_covars % prevent collapsing to a point
+ for i=1:dpsize
+ if min(svd(CPD.cov(:,:,i))) < min_covar
+ disp(['resetting singular covariance for node ' num2str(CPD.self)]);
+ CPD.cov(:,:,i) = CPD.init_cov(:,:,i);
+ end
+ end
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CPD_to_linear_gaussian.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CPD_to_linear_gaussian.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function [mu, Sigma, W] = CPD_to_linear_gaussian(CPD, domain, ns, cnodes, evidence)
+
+ps = domain(1:end-1);
+dnodes = mysetdiff(1:length(ns), cnodes);
+dps = myintersect(ps, dnodes); % discrete parents
+
+if isempty(dps)
+ Q = 1;
+else
+ assert(~any(isemptycell(evidence(dps))));
+ dpvals = cat(1, evidence{dps});
+ Q = subv2ind(ns(dps), dpvals(:)');
+end
+
+mu = CPD.mean(:,Q);
+Sigma = CPD.cov(:,:,Q);
+W = CPD.weights(:,:,Q);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/CPD_to_linear_gaussian.m/1.1.1.1/Wed May 29 15:59:52 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@gaussian_CPD/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/reset_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/reset_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function CPD = reset_ess(CPD)
+% RESET_ESS Reset the Expected Sufficient Statistics for a Gaussian CPD.
+% CPD = reset_ess(CPD)
+
+CPD.nsamples = 0;
+CPD.Wsum = zeros(size(CPD.Wsum));
+CPD.WYsum = zeros(size(CPD.WYsum));
+CPD.WYYsum = zeros(size(CPD.WYYsum));
+CPD.WXsum = zeros(size(CPD.WXsum));
+CPD.WXXsum = zeros(size(CPD.WXXsum));
+CPD.WXYsum = zeros(size(CPD.WXYsum));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/sample_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/sample_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function y = sample_node(CPD, pev)
+% SAMPLE_NODE Draw a random sample from P(Xi | x(pi_i), theta_i) (gaussian)
+% y = sample_node(CPD, parent_evidence)
+%
+% pev{i} is the value of the i'th parent (if there are any parents)
+% y is the sampled value (a scalar or vector)
+
+if length(CPD.dps)==0
+ i = 1;
+else
+ dpvals = cat(1, pev{CPD.dps});
+ i = subv2ind(CPD.sizes(CPD.dps), dpvals(:)');
+end
+
+if length(CPD.cps) == 0
+ y = gsamp(CPD.mean(:,i), CPD.cov(:,:,i), 1);
+else
+ pev = pev(:);
+ x = cat(1, pev{CPD.cps});
+ y = gsamp(CPD.mean(:,i) + CPD.weights(:,:,i)*x(:), CPD.cov(:,:,i), 1);
+end
+y = y(:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/set_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/set_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+function CPD = set_fields(CPD, varargin)
+% SET_PARAMS Set the parameters (fields) for a gaussian_CPD object
+% CPD = set_params(CPD, name/value pairs)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+%
+% mean - mu(:,i) is the mean given Q=i
+% cov - Sigma(:,:,i) is the covariance given Q=i
+% weights - W(:,:,i) is the regression matrix given Q=i
+% cov_type - if 'diag', Sigma(:,:,i) is diagonal
+% tied_cov - if 1, we constrain Sigma(:,:,i) to be the same for all i
+% clamp_mean - if 1, we do not adjust mu(:,i) during learning
+% clamp_cov - if 1, we do not adjust Sigma(:,:,i) during learning
+% clamp_weights - if 1, we do not adjust W(:,:,i) during learning
+% clamp - if 1, we do not adjust any params
+% cov_prior_weight - weight given to I prior for estimating Sigma
+% cov_prior_entropic - if 1, we also use an entropic prior for Sigma [0]
+%
+% e.g., CPD = set_params(CPD, 'mean', [0;0])
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'mean', CPD.mean = args{i+1};
+ case 'cov', CPD.cov = args{i+1};
+ case 'weights', CPD.weights = args{i+1};
+ case 'cov_type', CPD.cov_type = args{i+1};
+ %case 'tied_cov', CPD.tied_cov = strcmp(args{i+1}, 'yes');
+ case 'tied_cov', CPD.tied_cov = args{i+1};
+ case 'clamp_mean', CPD.clamped_mean = args{i+1};
+ case 'clamp_cov', CPD.clamped_cov = args{i+1};
+ case 'clamp_weights', CPD.clamped_weights = args{i+1};
+ case 'clamp', clamp = args{i+1};
+ CPD.clamped_mean = clamp;
+ CPD.clamped_cov = clamp;
+ CPD.clamped_weights = clamp;
+ case 'cov_prior_weight', CPD.cov_prior_weight = args{i+1};
+ case 'cov_prior_entropic', CPD.cov_prior_entropic = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/update_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/update_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,88 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a Gaussian node
+% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+
+%if nargin < 6
+% hidden_bitv = zeros(1, max(fmarginal.domain));
+% hidden_bitv(find(isempty(evidence)))=1;
+%end
+
+dom = fmarginal.domain;
+self = dom(end);
+ps = dom(1:end-1);
+cps = myintersect(ps, cnodes);
+dps = mysetdiff(ps, cps);
+
+CPD.nsamples = CPD.nsamples + 1;
+[ss cpsz dpsz] = size(CPD.weights); % ss = self size
+[ss dpsz] = size(CPD.mean);
+
+% Let X be the cts parent (if any), Y be the cts child (self).
+
+if ~hidden_bitv(self) & ~any(hidden_bitv(cps)) & all(hidden_bitv(dps))
+ % Speedup for the common case that all cts nodes are observed, all discrete nodes are hidden
+ % Since X and Y are observed, SYY = 0, SXX = 0, SXY = 0
+ % Since discrete parents are hidden, we do not need to add evidence to w.
+ w = fmarginal.T(:);
+ CPD.Wsum = CPD.Wsum + w;
+ y = evidence{self};
+ Cyy = y*y';
+ if ~CPD.useC
+ WY = repmat(w(:)',ss,1); % WY(y,i) = w(i)
+ WYY = repmat(reshape(WY, [ss 1 dpsz]), [1 ss 1]); % WYY(y,y',i) = w(i)
+ %CPD.WYsum = CPD.WYsum + WY .* repmat(y(:), 1, dpsz);
+ CPD.WYsum = CPD.WYsum + y(:) * w(:)';
+ CPD.WYYsum = CPD.WYYsum + WYY .* repmat(reshape(Cyy, [ss ss 1]), [1 1 dpsz]);
+ else
+ W = w(:)';
+ W2 = reshape(W, [1 1 dpsz]);
+ CPD.WYsum = CPD.WYsum + rep_mult(W, y(:), size(CPD.WYsum));
+ CPD.WYYsum = CPD.WYYsum + rep_mult(W2, Cyy, size(CPD.WYYsum));
+ end
+ if cpsz > 0 % X exists
+ x = cat(1, evidence{cps}); x = x(:);
+ Cxx = x*x';
+ Cxy = x*y';
+ WX = repmat(w(:)',cpsz,1); % WX(x,i) = w(i)
+ WXX = repmat(reshape(WX, [cpsz 1 dpsz]), [1 cpsz 1]); % WXX(x,x',i) = w(i)
+ WXY = repmat(reshape(WX, [cpsz 1 dpsz]), [1 ss 1]); % WXY(x,y,i) = w(i)
+ if ~CPD.useC
+ CPD.WXsum = CPD.WXsum + WX .* repmat(x(:), 1, dpsz);
+ CPD.WXXsum = CPD.WXXsum + WXX .* repmat(reshape(Cxx, [cpsz cpsz 1]), [1 1 dpsz]);
+ CPD.WXYsum = CPD.WXYsum + WXY .* repmat(reshape(Cxy, [cpsz ss 1]), [1 1 dpsz]);
+ else
+ CPD.WXsum = CPD.WXsum + rep_mult(W, x(:), size(CPD.WXsum));
+ CPD.WXXsum = CPD.WXXsum + rep_mult(W2, Cxx, size(CPD.WXXsum));
+ CPD.WXYsum = CPD.WXYsum + rep_mult(W2, Cxy, size(CPD.WXYsum));
+ end
+ end
+ return;
+end
+
+% general (non-vectorized) case
+fullm = add_evidence_to_gmarginal(fmarginal, evidence, ns, cnodes); % slow!
+
+if dpsz == 1 % no discrete parents
+ w = 1;
+else
+ w = fullm.T(:);
+end
+
+CPD.Wsum = CPD.Wsum + w;
+xi = 1:cpsz;
+yi = (cpsz+1):(cpsz+ss);
+for i=1:dpsz
+ muY = fullm.mu(yi, i);
+ SYY = fullm.Sigma(yi, yi, i);
+ CPD.WYsum(:,i) = CPD.WYsum(:,i) + w(i)*muY;
+ CPD.WYYsum(:,:,i) = CPD.WYYsum(:,:,i) + w(i)*(SYY + muY*muY'); % E[X Y] = Cov[X,Y] + E[X] E[Y]
+ if cpsz > 0
+ muX = fullm.mu(xi, i);
+ SXX = fullm.Sigma(xi, xi, i);
+ SXY = fullm.Sigma(xi, yi, i);
+ CPD.WXsum(:,i) = CPD.WXsum(:,i) + w(i)*muX;
+ CPD.WXXsum(:,:,i) = CPD.WXXsum(:,:,i) + w(i)*(SXX + muX*muX');
+ CPD.WXYsum(:,:,i) = CPD.WXYsum(:,:,i) + w(i)*(SXY + muX*muY');
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+/README/1.1.1.1/Wed May 29 15:59:52 2002//
+/adjustable_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/display.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/generic_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/learn_params.m/1.1.1.1/Thu Jun 10 01:53:20 2004//
+/log_prior.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/set_clamped.m/1.1.1.1/Wed May 29 15:59:52 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@generic_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/BIC_score_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/BIC_score_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function score = BIC_score_CPD(CPD, fam, data, ns, cnodes)
+% BIC_score_CPD Compute the BIC score of a generic CPD
+% score = BIC_score_CPD(CPD, fam, data, ns, cnodes)
+%
+% We assume this node has a maximize_params method
+
+ncases = size(data, 2);
+CPD = reset_ess(CPD);
+% make a fully observed joint distribution over the family
+fmarginal.domain = fam;
+fmarginal.T = 1;
+fmarginal.mu = [];
+fmarginal.Sigma = [];
+if ~iscell(data)
+ cases = num2cell(data);
+else
+ cases = data;
+end
+for m=1:ncases
+ CPD = update_ess(CPD, fmarginal, cases(:,m), ns, cnodes);
+end
+CPD = maximize_params(CPD);
+self = fam(end);
+ps = fam(1:end-1);
+L = log_prob_node(CPD, cases(self,:), cases(ps,:));
+score = L - 0.5*CPD.nparams*log(ncases);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CPD_to_dpots.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CPD_to_dpots.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function pots = CPD_to_dpots(CPD, domain, ns, cnodes, evidence)
+% CPD_TO_DPOTS Convert the CPD to several discrete potentials, for different instantiations (generic)
+% pots = CPD_to_dpots(CPD, domain, ns, cnodes, evidence)
+%
+% domain(:,i) is the domain of the i'th instantiation of CPD.
+% node_sizes(i) is the size of node i.
+% cnodes = all the cts nodes
+% evidence{i} is the evidence on the i'th node.
+%
+% This just calls CPD_to_dpot for each domain.
+
+nCPDs = size(domain,2);
+pots = cell(1,nCPDs);
+for i=1:nCPDs
+ pots{i} = CPD_to_dpot(CPD, domain(:,i), ns, cnodes, evidence);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+/BIC_score_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/CPD_to_dpots.m/1.1.1.1/Wed May 29 15:59:52 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@generic_CPD/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/README
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/README Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+A generic CPD implements general purpose functions like 'display',
+that subtypes can inherit.
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/adjustable_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/adjustable_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function p = adjustable_CPD(CPD)
+% ADJUSTABLE_CPD Does this CPD have any adjustable params? (generic)
+% p = adjustable_CPD(CPD)
+
+p = ~CPD.clamped;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+function display(CPD)
+
+disp(struct(CPD));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/generic_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/generic_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function CPD = generic_CPD(clamped)
+% GENERIC_CPD Virtual constructor for generic CPD
+% CPD = discrete_CPD(clamped)
+
+if nargin < 1, clamped = 0; end
+
+CPD.clamped = clamped;
+CPD = class(CPD, 'generic_CPD');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/learn_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/learn_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function CPD = learn_params(CPD, fam, data, ns, cnodes)
+% LEARN_PARAMS Compute the maximum likelihood estimate of the params of a generic CPD given complete data
+% CPD = learn_params(CPD, fam, data, ns, cnodes)
+%
+% data(i,m) is the value of node i in case m (can be cell array).
+% We assume this node has a maximize_params method.
+
+%error('no longer supported') % KPM 1 Feb 03
+
+if 1
+ncases = size(data, 2);
+CPD = reset_ess(CPD);
+% make a fully observed joint distribution over the family
+fmarginal.domain = fam;
+fmarginal.T = 1;
+fmarginal.mu = [];
+fmarginal.Sigma = [];
+if ~iscell(data)
+ cases = num2cell(data);
+else
+ cases = data;
+end
+hidden_bitv = zeros(1, max(fam));
+for m=1:ncases
+ % specify (as a bit vector) which elements in the family domain are hidden
+ hidden_bitv = zeros(1, max(fmarginal.domain));
+ ev = cases(:,m);
+ hidden_bitv(find(isempty(evidence)))=1;
+ CPD = update_ess(CPD, fmarginal, ev, ns, cnodes, hidden_bitv);
+end
+CPD = maximize_params(CPD);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/log_prior.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/log_prior.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function L = log_prior(CPD)
+% LOG_PRIOR Return log P(theta) for a generic CPD - we return 0
+% L = log_prior(CPD)
+
+L = 0;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/set_clamped.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/set_clamped.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+function CPD = set_clamped(CPD, bit)
+
+CPD.clamped = bit;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CPD_to_lambda_msg.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CPD_to_lambda_msg.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+function lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence)
+% CPD_TO_LAMBDA_MSG Compute lambda message (gmux)
+% lam_msg = compute_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence)
+% Pearl p183 eq 4.52
+
+% Let Y be this node, X1..Xn be the cts parents and M the discrete switch node.
+% e.g., for n=3, M=1
+%
+% X1 X2 X3 M
+% \
+% \
+% Y
+%
+% So the only case in which we send an informative message is if p=1=M.
+% To the other cts parents, we send the "know nothing" message.
+
+switch msg_type
+ case 'd',
+ error('gaussian_CPD can''t create discrete msgs')
+ case 'g',
+ cps = ps(CPD.cps);
+ cpsizes = CPD.sizes(CPD.cps);
+ self_size = CPD.sizes(end);
+ i = find_equiv_posns(p, cps); % p is n's i'th cts parent
+ psz = cpsizes(i);
+ dps = ps(CPD.dps);
+ M = evidence{dps};
+ if isempty(M)
+ error('gmux node must have observed discrete parent')
+ end
+ P = msg{n}.lambda.precision;
+ if all(P == 0) | (cps(M) ~= p) % if we know nothing, or are sending to a disconnected parent
+ lam_msg.precision = zeros(psz, psz);
+ lam_msg.info_state = zeros(psz, 1);
+ return;
+ end
+ % We are sending a message to the only effectively connected parent.
+ % There are no other incoming pi messages.
+ Bmu = CPD.mean(:,M);
+ BSigma = CPD.cov(:,:,M);
+ Bi = CPD.weights(:,:,M);
+ if (det(P) > 0) | isinf(P)
+ if isinf(P) % Y is observed
+ Sigma_lambda = zeros(self_size, self_size); % infinite precision => 0 variance
+ mu_lambda = msg{n}.lambda.mu; % observed_value;
+ else
+ Sigma_lambda = inv(P);
+ mu_lambda = Sigma_lambda * msg{n}.lambda.info_state;
+ end
+ C = inv(Sigma_lambda + BSigma);
+ lam_msg.precision = Bi' * C * Bi;
+ lam_msg.info_state = Bi' * C * (mu_lambda - Bmu);
+ else
+ % method that uses matrix inversion lemma
+ A = inv(P + inv(BSigma));
+ C = P - P*A*P;
+ lam_msg.precision = Bi' * C * Bi;
+ D = eye(self_size) - P*A;
+ z = msg{n}.lambda.info_state;
+ lam_msg.info_state = Bi' * (D*z - D*P*Bmu);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CPD_to_pi.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CPD_to_pi.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence)
+% CPD_TO_PI Compute the pi vector (gaussian)
+% function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence)
+
+switch msg_type
+ case 'd',
+ error('gaussian_CPD can''t create discrete msgs')
+ case 'g',
+ dps = ps(CPD.dps);
+ k = evidence{dps};
+ if isempty(k)
+ error('gmux node must have observed discrete parent')
+ end
+ m = msg{n}.pi_from_parent{k};
+ B = CPD.weights(:,:,k);
+ pi.mu = CPD.mean(:,k) + B * m.mu;
+ pi.Sigma = CPD.cov(:,:,k) + B * m.Sigma * B';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/CPD_to_lambda_msg.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/CPD_to_pi.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/convert_to_pot.m/1.1.1.1/Wed May 29 15:59:52 2002//
+/display.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/gmux_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@gmux_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/gmux_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@gmux_CPD/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/gmux_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/gmux_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,92 @@
+function CPD = gmux_CPD(bnet, self, varargin)
+% GMUX_CPD Make a Gaussian multiplexer node
+%
+% CPD = gmux_CPD(bnet, node, ...) is used similarly to gaussian_CPD,
+% except we assume there is exactly one discrete parent (call it M)
+% which is used to select which cts parent to pass through to the output.
+% i.e., we define P(Y=y|M=m, X1, ..., XK) = N(y | W*x(m) + mu, Sigma)
+% where Y represents this node, and the Xi's are the cts parents.
+% All the Xi must have the same size, and the num values for M must be K.
+%
+% Currently the params for this kind of CPD cannot be learned.
+%
+% Optional arguments [ default in brackets ]
+%
+% mean - mu [zeros(Y,1)]
+% cov - Sigma [eye(Y,Y)]
+% weights - W [ randn(Y,X) ]
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ clamp = 0;
+ CPD = class(CPD, 'gmux_CPD', generic_CPD(clamp));
+ return;
+elseif isa(bnet, 'gmux_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+CPD = class(CPD, 'gmux_CPD', generic_CPD(1));
+
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, self);
+dps = myintersect(ps, bnet.dnodes);
+cps = myintersect(ps, bnet.cnodes);
+fam_sz = ns([ps self]);
+
+CPD.self = self;
+CPD.sizes = fam_sz;
+
+% Figure out which (if any) of the parents are discrete, and which cts, and how big they are
+% dps = discrete parents, cps = cts parents
+CPD.cps = find_equiv_posns(cps, ps); % cts parent index
+CPD.dps = find_equiv_posns(dps, ps);
+if length(CPD.dps) ~= 1
+ error('gmux must have exactly 1 discrete parent')
+end
+ss = fam_sz(end);
+cpsz = fam_sz(CPD.cps(1)); % in gaussian_CPD, cpsz = sum(fam_sz(CPD.cps))
+if ~all(fam_sz(CPD.cps) == cpsz)
+ error('all cts parents must have same size')
+end
+dpsz = fam_sz(CPD.dps);
+if dpsz ~= length(cps)
+ error(['the arity of the mux node is ' num2str(dpsz) ...
+ ' but there are ' num2str(length(cps)) ' cts parents']);
+end
+
+% set default params
+CPD.mean = zeros(ss, 1);
+CPD.cov = eye(ss);
+CPD.weights = randn(ss, cpsz);
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'mean', CPD.mean = args{i+1};
+ case 'cov', CPD.cov = args{i+1};
+ case 'weights', CPD.weights = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.self = [];
+CPD.sizes = [];
+CPD.cps = [];
+CPD.dps = [];
+CPD.mean = [];
+CPD.cov = [];
+CPD.weights = [];
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/convert_to_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/convert_to_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+function pot = convert_to_pot(CPD, pot_type, domain, evidence)
+% CONVERT_TO_POT Convert a gmux CPD to a Gaussian potential
+% pot = convert_to_pot(CPD, pot_type, domain, evidence)
+
+switch pot_type
+ case {'d', 'u', 'cg', 'scg'},
+ error(['can''t convert gmux to potential of type ' pot_type])
+
+ case {'c','g'},
+ % We create a large weight matrix with zeros in all blocks corresponding
+ % to the non-chosen parents, since they are effectively disconnected.
+ % The chosen parent is determined by the value, m, of the discrete parent.
+ % Thus the potential is as large as the whole family.
+ ps = domain(1:end-1);
+ dps = ps(CPD.dps); % CPD.dps is an index, not a node number (because of param tying)
+ cps = ps(CPD.cps);
+ m = evidence{dps};
+ if isempty(m)
+ error('gmux node must have observed discrete parent')
+ end
+ bs = CPD.sizes(CPD.cps);
+ b = block(m, bs);
+ sum_cpsz = sum(CPD.sizes(CPD.cps));
+ selfsz = CPD.sizes(end);
+ W = zeros(selfsz, sum_cpsz);
+ W(:,b) = CPD.weights(:,:,m);
+
+ ns = zeros(1, max(domain));
+ ns(domain) = CPD.sizes;
+ self = domain(end);
+ cdom = [cps(:)' self];
+ pot = linear_gaussian_to_cpot(CPD.mean(:,m), CPD.cov(:,:,m), W, domain, ns, cdom, evidence);
+
+ otherwise,
+ error(['unrecognized pot_type' pot_type])
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(CPD)
+
+disp('gmux_CPD object');
+disp(struct(CPD));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/gmux_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/gmux_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,95 @@
+function CPD = gmux_CPD(bnet, self, varargin)
+% GMUX_CPD Make a Gaussian multiplexer node
+%
+% CPD = gmux_CPD(bnet, node, ...) is used similarly to gaussian_CPD,
+% except we assume there is exactly one discrete parent (call it M)
+% which is used to select which cts parent to pass through to the output.
+% i.e., we define P(Y=y|M=m, X1, ..., XK) = N(y | W(m)*x(m) + mu(m), Sigma(m))
+% where Y represents this node, and the Xi's are the cts parents.
+% All the Xi must have the same size, and the num values for M must be K.
+%
+% Currently the params for this kind of CPD cannot be learned.
+%
+% Optional arguments [ default in brackets ]
+%
+% mean - mu(:,i) is the mean given M=i [ zeros(Y,K) ]
+% cov - Sigma(:,:,i) is the covariance given M=i [ repmat(1*eye(Y,Y), [1 1 K]) ]
+% weights - W(:,:,i) is the regression matrix given M=i [ randn(Y,X,K) ]
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ clamp = 0;
+ CPD = class(CPD, 'gmux_CPD', generic_CPD(clamp));
+ return;
+elseif isa(bnet, 'gmux_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+CPD = class(CPD, 'gmux_CPD', generic_CPD(1));
+
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, self);
+dps = myintersect(ps, bnet.dnodes);
+cps = myintersect(ps, bnet.cnodes);
+fam_sz = ns([ps self]);
+
+CPD.self = self;
+CPD.sizes = fam_sz;
+
+% Figure out which (if any) of the parents are discrete, and which cts, and how big they are
+% dps = discrete parents, cps = cts parents
+CPD.cps = find_equiv_posns(cps, ps); % cts parent index
+CPD.dps = find_equiv_posns(dps, ps);
+if length(CPD.dps) ~= 1
+ error('gmux must have exactly 1 discrete parent')
+end
+ss = fam_sz(end);
+cpsz = fam_sz(CPD.cps(1)); % in gaussian_CPD, cpsz = sum(fam_sz(CPD.cps))
+if ~all(fam_sz(CPD.cps) == cpsz)
+ error('all cts parents must have same size')
+end
+dpsz = fam_sz(CPD.dps);
+if dpsz ~= length(cps)
+ error(['the arity of the mux node is ' num2str(dpsz) ...
+ ' but there are ' num2str(length(cps)) ' cts parents']);
+end
+
+% set default params
+%CPD.mean = zeros(ss, 1);
+%CPD.cov = eye(ss);
+%CPD.weights = randn(ss, cpsz);
+CPD.mean = zeros(ss, dpsz);
+CPD.cov = 1*repmat(eye(ss), [1 1 dpsz]);
+CPD.weights = randn(ss, cpsz, dpsz);
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'mean', CPD.mean = args{i+1};
+ case 'cov', CPD.cov = args{i+1};
+ case 'weights', CPD.weights = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.self = [];
+CPD.sizes = [];
+CPD.cps = [];
+CPD.dps = [];
+CPD.mean = [];
+CPD.cov = [];
+CPD.weights = [];
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/sample_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/sample_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function y = sample_node(CPD, pev)
+% SAMPLE_NODE Draw a random sample from P(Xi | x(pi_i), theta_i) (gmux)
+% y = sample_node(CPD, parent_evidence)
+%
+% parent_ev{i} is the value of the i'th parent
+
+dpval = pev{CPD.dps};
+x = pev{CPD.cps(dpval)};
+y = gsamp(CPD.mean(:,dpval) + CPD.weights(:,:,dpval)*x(:), CPD.cov(:,:,dpval), 1);
+y = y(:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CPD_to_CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CPD_to_CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+function CPT = CPD_to_CPT(CPD)
+% Compute the big CPT for an HHMM Q node (including F parents)
+% by combining internal transprob and startprob
+% function CPT = CPD_to_CPT(CPD)
+
+Qsz = CPD.Qsz;
+
+if ~isempty(CPD.Fbelow_ndx)
+ if ~isempty(CPD.Fself_ndx) % general case
+ error('not implemented')
+ else % no F from self, hence no startprob (top level)
+ nps = length(CPD.dom_sz)-1; % num parents
+ CPT = 0*myones(CPD.dom_sz);
+ % when Fself=1, the CPT(i,j) = delta(i,j) for all k
+ for k=1:prod(CPD.Qpsizes)
+ Qps_vals = ind2subv(CPD.Qpsizes, k);
+ ndx = mk_multi_index(nps+1, [CPD.Fbelow_ndx CPD.Qps_ndx], [1 Qps_vals]);
+ CPT(ndx{:}) = eye(Qsz); % CPT(:,2,k,:) or CPT(:,k,2,:) etc
+ end
+ ndx = mk_multi_index(nps+1, CPD.Fbelow_ndx, 2);
+ CPT(ndx{:}) = CPD.transprob; % we assume transprob is in topo order
+ end
+else % no F signal from below
+ if ~isempty(CPD.Fself_ndx) % bottom level
+ nps = length(CPD.dom_sz)-1; % num parents
+ CPT = 0*myones(CPD.dom_sz);
+ ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 1);
+ CPT(ndx{:}) = CPD.transprob;
+ ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 2);
+ CPT(ndx{:}) = CPD.startprob;
+ else % no F from self
+ error('An hhmmQ node without any F parents is just a tabular_CPD')
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/CPD_to_CPT.m/1.1.1.1/Tue Sep 24 12:46:46 2002//
+/hhmm2Q_CPD.m/1.1.1.1/Tue Sep 24 22:34:40 2002//
+/maximize_params.m/1.1.1.1/Tue Sep 24 22:44:36 2002//
+/reset_ess.m/1.1.1.1/Tue Sep 24 22:36:16 2002//
+/update_ess.m/1.1.1.1/Tue Sep 24 22:43:30 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@hhmm2Q_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/hhmm2Q_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/hhmm2Q_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+function CPD = hhmm2Q_CPD(bnet, self, varargin)
+% HHMMQ_CPD Make the CPD for a Q node in a 2 level hierarchical HMM
+% CPD = hhmmQ_CPD(bnet, self, ...)
+%
+% Fself(t-1) Qps
+% \ |
+% \ v
+% Qold(t-1) -> Q(t)
+% /
+% /
+% Fbelow(t-1)
+%
+%
+% optional args [defaults]
+%
+% Fself - node number <= ss
+% Fbelow - node number <= ss
+% Qps - node numbers (all <= 2*ss) - uses 2TBN indexing
+% transprob - CPT for when Fbelow=2 and Fself=1
+% startprob - CPT for when Fbelow=2 and Fself=2
+% If Fbelow=1, we cannot change state.
+
+ss = bnet.nnodes_per_slice;
+ns = bnet.node_sizes(:);
+
+% set default arguments
+Fself = [];
+Fbelow = [];
+Qps = [];
+startprob = [];
+transprob = [];
+
+for i=1:2:length(varargin)
+ switch varargin{i},
+ case 'Fself', Fself = varargin{i+1};
+ case 'Fbelow', Fbelow = varargin{i+1};
+ case 'Qps', Qps = varargin{i+1};
+ case 'transprob', transprob = varargin{i+1};
+ case 'startprob', startprob = varargin{i+1};
+ end
+end
+
+ps = parents(bnet.dag, self);
+old_self = self-ss;
+ndsz = ns(:)';
+CPD.dom_sz = [ndsz(ps) ns(self)];
+CPD.Fself_ndx = find_equiv_posns(Fself, ps);
+CPD.Fbelow_ndx = find_equiv_posns(Fbelow, ps);
+Qps = mysetdiff(ps, [Fself Fbelow old_self]);
+CPD.Qps_ndx = find_equiv_posns(Qps, ps);
+CPD.old_self_ndx = find_equiv_posns(old_self, ps);
+
+Qps = ps(CPD.Qps_ndx);
+CPD.Qsz = ns(self);
+CPD.Qpsizes = ns(Qps);
+
+CPD.transprob = transprob;
+CPD.startprob = startprob;
+CPD.start_counts = [];
+CPD.trans_counts = [];
+
+CPD = class(CPD, 'hhmm2Q_CPD', discrete_CPD(0, CPD.dom_sz));
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/maximize_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/maximize_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function CPD = maximize_params(CPD, temp)
+% MAXIMIZE_PARAMS Set the params of a hhmmQ node to their ML/MAP values.
+% CPD = maximize_params(CPD, temperature)
+
+if sum(CPD.start_counts(:)) > 0
+ CPD.startprob = mk_stochastic(CPD.start_counts);
+end
+if sum(CPD.trans_counts(:)) > 0
+ CPD.transprob = mk_stochastic(CPD.trans_counts);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/reset_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/reset_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function CPD = reset_ess(CPD)
+% RESET_ESS Reset the Expected Sufficient Statistics of a hhmm2 Q node.
+% CPD = reset_ess(CPD)
+
+domsz = CPD.dom_sz;
+domsz(CPD.Fself_ndx) = 1;
+domsz(CPD.Fbelow_ndx) = 1;
+Qdom_sz = domsz;
+Qdom_sz(Qdom_sz==1)=[]; % get rid of dimensions of size 1
+
+CPD.start_counts = zeros(Qdom_sz);
+CPD.trans_counts = zeros(Qdom_sz);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/update_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/update_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+
+marg = add_ev_to_dmarginal(fmarginal, evidence, ns);
+
+nps = length(CPD.dom_sz)-1; % num parents
+
+if ~isempty(CPD.Fbelow_ndx)
+ if ~isempty(CPD.Fself_ndx) % general case
+ ndx = mk_multi_index(nps+1, [CPD.Fbelow_ndx CPD.Fself_ndx], [2 1]);
+ CPD.trans_counts = CPD.trans_counts + squeeze(marg.T(ndx{:}));
+ ndx = mk_multi_index(nps+1, [CPD.Fbelow_ndx CPD.Fself_ndx], [2 2]);
+ CPD.start_counts = CPD.start_counts + squeeze(marg.T(ndx{:}));
+ else % no F from self, hence no startprob (top level)
+ ndx = mk_multi_index(nps+1, CPD.Fbelow_ndx, 2);
+ CPD.trans_counts = CPD.trans_counts + squeeze(marg.T(ndx{:}));
+ end
+else % no F signal from below
+ if ~isempty(CPD.Fself_ndx) % self F (bottom level)
+ ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 1);
+ CPD.trans_counts = CPD.trans_counts + squeeze(marg.T(ndx{:}));
+ ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 2);
+ CPD.start_counts = CPD.start_counts + squeeze(marg.T(ndx{:}));
+ else % no F from self or below
+ error('no F signal')
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/hhmmF_CPD.m/1.1.1.1/Mon Jun 24 23:38:24 2002//
+/log_prior.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/maximize_params.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/reset_ess.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/update_CPT.m/1.1.1.1/Mon Jun 24 22:45:04 2002//
+/update_ess.m/1.1.1.1/Mon Jun 24 23:54:30 2002//
+D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@hhmmF_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/hhmmF_CPD.m/1.1.1.1/Mon Jun 24 22:35:06 2002//
+/log_prior.m/1.1.1.1/Mon Jun 24 22:35:06 2002//
+/maximize_params.m/1.1.1.1/Mon Jun 24 22:35:06 2002//
+/reset_ess.m/1.1.1.1/Mon Jun 24 22:35:06 2002//
+/update_CPT.m/1.1.1.1/Mon Jun 24 22:35:06 2002//
+/update_ess.m/1.1.1.1/Mon Jun 24 22:35:06 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@hhmmF_CPD/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/hhmmF_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/hhmmF_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,76 @@
+function CPD = hhmmF_CPD(bnet, self, Qnodes, d, D, varargin)
+% HHMMF_CPD Make the CPD for an F node at depth D of a D-level hierarchical HMM
+% CPD = hhmmF_CPD(bnet, self, Qnodes, d, D, ...)
+%
+% Q(d-1)
+% \
+% \
+% F(d)
+% / |
+% / |
+% Q(d) F(d+1)
+%
+% We assume nodes are ordered (numbered) as follows:
+% Q(1), ... Q(d), F(d+1), F(d)
+%
+% F(d)=2 means level d has finished. The prob this happens depends on Q(d)
+% and optionally on Q(d-1), Q(d=1), ..., Q(1).
+% Also, level d can only finish if the level below has finished
+% (hence the F(d+1) -> F(d) arc).
+%
+% If d=D, there is no F(d+1), so F(d) is just a regular tabular_CPD.
+% If all models always finish in the same state (e.g., their last),
+% we don't need to condition on the state of parent models (Q(d-1), ...)
+%
+% optional args [defaults]
+%
+% termprob - termprob(k,i,2) = prob finishing given Q(d)=i and Q(1:d-1)=k [ finish in last state ]
+%
+% hhmmF_CPD is a subclass of tabular_CPD so we inherit inference methods like CPD_to_pot, etc.
+%
+% We create an isolated tabular_CPD with no F parent to learn termprob
+% so we can avail of e.g., entropic or Dirichlet priors.
+%
+% For details, see "Linear-time inference in hierarchical HMMs", Murphy and Paskin, NIPS'01.
+
+
+ps = parents(bnet.dag, self);
+Qps = myintersect(ps, Qnodes);
+F = mysetdiff(ps, Qps);
+CPD.Q = Qps(end); % Q(d)
+assert(CPD.Q == Qnodes(d));
+CPD.Qps = Qps(1:end-1); % all Q parents except Q(d), i.e., calling context
+
+ns = bnet.node_sizes(:);
+CPD.Qsizes = ns(Qnodes);
+CPD.d = d;
+CPD.D = D;
+
+Qsz = ns(CPD.Q);
+Qpsz = prod(ns(CPD.Qps));
+
+% set default arguments
+p = 0.9;
+%termprob(k,i,t) Might terminate if i=Qsz; will not terminate if i % We sum over the possibilities that F(d+1) = 1 or 2
+
+obs_self = ~hidden_bitv(Q);
+if obs_self
+ self_val = evidence{Q};
+end
+
+if isempty(Qps) % independent of parent context
+ counts = zeros(Qsz, 2);
+ %fmarginal.T(Q(d), F(d+1), F(d))
+ if obs_self
+ marg = myreshape(fmarginal.T, [1 2 2]);
+ counts(self_val,:) = marg(1,2,:);
+ %counts(self_val,:) = marg(1,1,:) + marg(1,2,:);
+ else
+ marg = myreshape(fmarginal.T, [Qsz 2 2]);
+ counts = squeeze(marg(:,2,:));
+ %counts = squeeze(marg(:,2,:)) + squeeze(marg(:,1,:));
+ end
+else
+ counts = zeros(Qpsz, Qsz, 2);
+ %fmarginal.T(Q(1:d-1), Q(d), F(d+1), F(d))
+ obs_Qps = ~any(hidden_bitv(Qps)); % we assume that all or none of the Q parents are observed
+ if obs_Qps
+ Qps_val = subv2ind(Qpsz, cat(1, evidence{Qps}));
+ end
+ if obs_self & obs_Qps
+ marg = myreshape(fmarginal.T, [1 1 2 2]);
+ counts(Qps_val, self_val, :) = squeeze(marg(1,1,2,:));
+ %counts(Qps_val, self_val, :) = squeeze(marg(1,1,2,:)) + squeeze(marg(1,1,1,:));
+ elseif ~obs_self & obs_Qps
+ marg = myreshape(fmarginal.T, [1 Qsz 2 2]);
+ counts(Qps_val, :, :) = squeeze(marg(1,:,2,:));
+ %counts(Qps_val, :, :) = squeeze(marg(1,:,2,:)) + squeeze(marg(1,:,1,:));
+ elseif obs_self & ~obs_Qps
+ error('not yet implemented')
+ else
+ marg = myreshape(fmarginal.T, [Qpsz Qsz 2 2]);
+ counts(:, :, :) = squeeze(marg(:,:,2,:));
+ %counts(:, :, :) = squeeze(marg(:,:,2,:)) + squeeze(marg(:,:,1,:));
+ end
+end
+
+CPD.sub_CPD_term = update_ess_simple(CPD.sub_CPD_term, counts);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/hhmmF_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/hhmmF_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,73 @@
+function CPD = hhmmF_CPD(bnet, self, Qself, Fbelow, varargin)
+% HHMMF_CPD Make the CPD for an F node in a hierarchical HMM
+% CPD = hhmmF_CPD(bnet, self, Qself, Fbelow, ...)
+%
+% Qps
+% \
+% \
+% Fself
+% / |
+% / |
+% Qself Fbelow
+%
+% We assume nodes are ordered (numbered) as follows: Qps, Q, Fbelow, F
+% All nodes numbers should be from slice 1.
+%
+% If Fbelow if missing, this becomes a regular tabular_CPD.
+% Qps may be omitted.
+%
+% optional args [defaults]
+%
+% Qps - node numbers.
+% termprob - termprob(k,i,2) = prob finishing given Q(d)=i and Q(1:d-1)=k [ finish in last state wp 0.9]
+%
+% hhmmF_CPD is a subclass of tabular_CPD so we inherit inference methods like CPD_to_pot, etc.
+%
+% We create an isolated tabular_CPD with no F parent to learn termprob
+% so we can avail of e.g., entropic or Dirichlet priors.
+%
+% For details, see "Linear-time inference in hierarchical HMMs", Murphy and Paskin, NIPS'01.
+
+
+
+Qps = [];
+% get parents
+for i=1:2:length(varargin)
+ switch varargin{i},
+ case 'Qps', Qps = varargin{i+1};
+ end
+end
+
+ns = bnet.node_sizes(:);
+Qsz = ns(Qself);
+Qpsz = prod(ns(Qps));
+CPD.Qsz = Qsz;
+CPD.Qpsz = Qpsz;
+
+ps = parents(bnet.dag, self);
+CPD.Fbelow_ndx = find_equiv_posns(Fbelow, ps);
+CPD.Qps_ndx = find_equiv_posns(Qps, ps);
+CPD.Qself_ndx = find_equiv_posns(Qself, ps);
+
+% set default arguments
+p = 0.9;
+%termprob(k,i,t) Might terminate if i=Qsz; will not terminate if i Qd(t)
+% /
+% /
+% Fd+1(t-1)
+%
+% We assume parents are ordered (numbered) as follows:
+% Qd(t-1), Fd+1(t-1), Fd(t-1), Q1(t), ..., Qd(t)
+%
+% The parents of Qd(t) can either be just Qd-1(t) or the whole stack Q1:d-1(t) (allQ)
+% In either case, we will call them Qps.
+% If d=1, Qps does not exist. Also, the F1(t-1) -> Q1(t) arc is optional.
+% If the arc is missing, startprob does not need to be specified,
+% since the toplevel is assumed to never reset (F1 does not exist).
+% If d=D, Fd+1(t-1) does not exist (there is no signal from below).
+%
+% optional args [defaults]
+%
+% transprob - transprob(i,k,j) = prob transition from i to j given Qps = k ['leftright']
+% selfprob - prob of a transition from i to i given Qps=k [0.1]
+% startprob - startprob(k,j) = prob start in j given Qps = k ['leftstart']
+% startargs - other args to be passed to the sub tabular_CPD for learning startprob
+% transargs - other args will be passed to the sub tabular_CPD for learning transprob
+% allQ - 1 means use all Q nodes above d as parents, 0 means just level d-1 [0]
+% F1toQ1 - 1 means add F1(t-1) -> Q1(t) arc, 0 means level 1 never resets [0]
+%
+% For d=1, startprob(1,j) is only needed if F1toQ1=1
+% Also, transprob(i,j) can be used instead of transprob(i,1,j).
+%
+% hhmmQ_CPD is a subclass of tabular_CPD so we inherit inference methods like CPD_to_pot, etc.
+%
+% We create isolated tabular_CPDs with no F parents to learn transprob/startprob
+% so we can avail of e.g., entropic or Dirichlet priors.
+% In the future, we will be able to represent the transprob using a tree_CPD.
+%
+% For details, see "Linear-time inference in hierarchical HMMs", Murphy and Paskin, NIPS'01.
+
+
+ss = bnet.nnodes_per_slice;
+%assert(self == Qnodes(d)+ss);
+ns = bnet.node_sizes(:);
+CPD.Qsizes = ns(Qnodes);
+CPD.d = d;
+CPD.D = D;
+allQ = 0;
+
+% find out which parents to use, to get right size
+for i=1:2:length(varargin)
+ switch varargin{i},
+ case 'allQ', allQ = varargin{i+1};
+ end
+end
+
+if d==1
+ CPD.Qps = [];
+else
+ if allQ
+ CPD.Qps = Qnodes(1:d-1);
+ else
+ CPD.Qps = Qnodes(d-1);
+ end
+end
+
+Qsz = ns(self);
+Qpsz = prod(ns(CPD.Qps));
+
+% set default arguments
+startprob = 'leftstart';
+transprob = 'leftright';
+startargs = {};
+transargs = {};
+CPD.F1toQ1 = 0;
+selfprob = 0.1;
+
+for i=1:2:length(varargin)
+ switch varargin{i},
+ case 'transprob', transprob = varargin{i+1};
+ case 'selfprob', selfprob = varargin{i+1};
+ case 'startprob', startprob = varargin{i+1};
+ case 'startargs', startargs = varargin{i+1};
+ case 'transargs', transargs = varargin{i+1};
+ case 'F1toQ1', CPD.F1toQ1 = varargin{i+1};
+ end
+end
+
+Qps = CPD.Qps + ss;
+old_self = self-ss;
+
+if strcmp(transprob, 'leftright')
+ LR = mk_leftright_transmat(Qsz, selfprob);
+ transprob = repmat(reshape(LR, [1 Qsz Qsz]), [Qpsz 1 1]); % transprob(k,i,j)
+ transprob = permute(transprob, [2 1 3]); % now transprob(i,k,j)
+end
+transargs{end+1} = 'CPT';
+transargs{end+1} = transprob;
+CPD.sub_CPD_trans = mk_isolated_tabular_CPD([old_self Qps], ns([old_self Qps self]), transargs);
+S = struct(CPD.sub_CPD_trans);
+CPD.transprob = myreshape(S.CPT, [Qsz Qpsz Qsz]);
+
+
+if strcmp(startprob, 'leftstart')
+ startprob = zeros(Qpsz, Qsz);
+ startprob(:,1) = 1;
+end
+
+if (d==1) & ~CPD.F1toQ1
+ CPD.sub_CPD_start = [];
+ CPD.startprob = [];
+else
+ startargs{end+1} = 'CPT';
+ startargs{end+1} = startprob;
+ CPD.sub_CPD_start = mk_isolated_tabular_CPD(Qps, ns([Qps self]), startargs);
+ S = struct(CPD.sub_CPD_start);
+ CPD.startprob = myreshape(S.CPT, [Qpsz Qsz]);
+end
+
+CPD = class(CPD, 'hhmmQ_CPD', tabular_CPD(bnet, self));
+
+CPD = update_CPT(CPD);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/log_prior.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/log_prior.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function L = log_prior(CPD)
+% LOG_PRIOR Return log P(theta) for a hhmm CPD
+% L = log_prior(CPD)
+
+L = log_prior(CPD.sub_CPD_trans);
+if ~isempty(CPD.sub_CPD_start)
+ L = L + log_prior(CPD.sub_CPD_start);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/maximize_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/maximize_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function CPD = maximize_params(CPD, temp)
+% MAXIMIZE_PARAMS Set the params of a hhmmQ node to their ML/MAP values.
+% CPD = maximize_params(CPD, temperature)
+
+Qsz = CPD.Qsizes(CPD.d);
+Qpsz = prod(CPD.Qsizes(CPD.Qps));
+
+if ~isempty(CPD.sub_CPD_start)
+ CPD.sub_CPD_start = maximize_params(CPD.sub_CPD_start, temp);
+ S = struct(CPD.sub_CPD_start);
+ CPD.startprob = myreshape(S.CPT, [Qpsz Qsz]);
+ %CPD.startprob = S.CPT;
+end
+
+if 1
+ % If we are in a state that can only go the end state,
+ % we will never see a transition to another (non-end) state,
+ % so counts(i,k,j)=0 (and termprob(k,i)=1).
+ % We set counts(i,k,i)=1 in this case.
+ % This will cause remove_hhmm_end_state to return a
+ % stochastic matrix, but otherwise has no effect on EM.
+ counts = get_field(CPD.sub_CPD_trans, 'counts');
+ counts = reshape(counts, [Qsz Qpsz Qsz]);
+ for k=1:Qpsz
+ for i=1:Qsz
+ if sum(counts(i,k,:))==0 % never witnessed a transition out of i
+ counts(i,k,i)=1; % add self loop
+ %fprintf('CPDQ d=%d i=%d k=%d\n', CPD.d, i, k);
+ end
+ end
+ end
+ CPD.sub_CPD_trans = set_fields(CPD.sub_CPD_trans, 'counts', counts(:));
+end
+
+CPD.sub_CPD_trans = maximize_params(CPD.sub_CPD_trans, temp);
+S = struct(CPD.sub_CPD_trans);
+%CPD.transprob = S.CPT;
+CPD.transprob = myreshape(S.CPT, [Qsz Qpsz Qsz]);
+
+CPD = update_CPT(CPD);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/reset_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/reset_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function CPD = reset_ess(CPD)
+% RESET_ESS Reset the Expected Sufficient Statistics of a hhmm Q node.
+% CPD = reset_ess(CPD)
+
+if ~isempty(CPD.sub_CPD_start)
+ CPD.sub_CPD_start = reset_ess(CPD.sub_CPD_start);
+end
+CPD.sub_CPD_trans = reset_ess(CPD.sub_CPD_trans);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,74 @@
+function CPD = update_CPT(CPD)
+% Compute the big CPT for an HHMM Q node (including F parents) given internal transprob and startprob
+% function CPD = update_CPT(CPD)
+
+Qsz = CPD.Qsz;
+Qpsz = CPD.Qpsz;
+
+if ~isempty(CPD.Fbelow_ndx)
+ if ~isempty(CPD.Fself_ndx) % general case
+ % Fb(t-1) Fself(t-1) P(Q(t)=j| Q(t-1)=i, Qps(t)=k)
+ % ------------------------------------------------------
+ % 1 1 delta(i,j)
+ % 2 1 transprob(i,k,j)
+ % 1 2 impossible
+ % 2 2 startprob(k,j)
+ CPT = zeros(Qsz, 2, 2, Qpsz, Qsz);
+ I = repmat(eye(Qsz), [1 1 Qpsz]); % i,j,k
+ I = permute(I, [1 3 2]); % i,k,j
+ CPT(:, 1, 1, :, :) = I;
+ CPT(:, 2, 1, :, :) = CPD.transprob;
+ CPT(:, 1, 2, :, :) = I;
+ CPT(:, 2, 2, :, :) = repmat(reshape(CPD.startprob, [1 Qpsz Qsz]), [Qsz 1 1]); % replicate over i
+ else % no F from self, hence no startprob
+ % Fb(t-1) P(Q(t)=j| Q(t-1)=i, Qps(t)=k)
+ % ------------------------------------------------------
+ % 1 delta(i,j)
+ % 2 transprob(i,k,j)
+
+ nps = length(CPD.dom_sz)-1; % num parents
+ CPT = 0*myones(CPD.dom_sz);
+ %CPT = zeros(Qsz, 2, Qpsz, Qsz); % assumes CPT(Q(t-1), F(t-1), Qps, Q(t))
+ % but a member of Qps may preceed Q(t-1) or F(t-1) in the ordering
+
+ I = repmat(eye(Qsz), [1 1 Qpsz]); % i,j,k
+ I = permute(I, [1 3 2]); % i,k,j
+
+ % the following fails if there is a member of Qps with a lower
+ % number than F
+ %CPT(:, 1, :, :) = I;
+ %CPT(:, 2, :, :) = CPD.transprob;
+
+ ndx = mk_multi_index(nps+1, CPD.Fbelow_ndx, 1);
+ CPT(ndx{:}) = I;
+ ndx = mk_multi_index(nps+1, CPD.Fbelow_ndx, 2);
+ CPT(ndx{:}) = CPD.transprob;
+ keyboard
+ end
+else % no F signal from below
+ if ~isempty(CPD.Fself_ndx)
+ % Q(t-1), Fself(t-1), Qps, Q(t)
+
+ % if condition start on previous concrete state (as in map learning),
+ % CPT(:, 1, :, :, :) = CPD.transprob(Q(t-1), Qps, Q(t))
+ % CPT(:, 2, :, :, :) = CPD.startprob(Q(t-1), Qps, Q(t))
+
+ % Fself(t-1) P(Q(t-1)=i, Qps(t)=k -> Q(t)=j)
+ % ------------------------------------------------------
+ % 1 transprob(i,k,j)
+ % 2 startprob(k,j)
+ CPT = zeros(Qsz, 2, Qpsz, Qsz);
+ I = repmat(eye(Qsz), [1 1 Qpsz]); % i,j,k
+ I = permute(I, [1 3 2]); % i,k,j
+ CPT(:, 1, :, :) = CPD.transprob;
+ if CPD.fullstartprob
+ CPT(:, 2, :, :) = CPD.startprob;
+ else
+ CPT(:, 2, :, :) = repmat(reshape(CPD.startprob, [1 Qpsz Qsz]), [Qsz 1 1]); % replicate over i
+ end
+ else % no F from self
+ error('An hhmmQ node without any F parents is just a tabular_CPD')
+ end
+end
+
+CPD = set_fields(CPD, 'CPT', CPT);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,141 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a hhmm Q node.
+% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, idden_bitv)
+
+% Figure out the node numbers associated with each parent
+% e.g., D=4, d=3, Qps = all Qs above, so dom = [Q3(t-1) F4(t-1) F3(t-1) Q1(t) Q2(t) Q3(t)].
+% so self = Q3(t), old_self = Q3(t-1), CPD.Qps = [1 2], Qps = [Q1(t) Q2(t)]
+dom = fmarginal.domain;
+self = dom(end);
+old_self = dom(1);
+Qps = dom(length(dom)-length(CPD.Qps):end-1);
+
+Qsz = CPD.Qsizes(CPD.d);
+Qpsz = prod(CPD.Qsizes(CPD.Qps));
+
+% If some of the Q nodes are observed (which happens during supervised training)
+% the counts will only be non-zero in positions
+% consistent with the evidence. We put the computed marginal responsibilities
+% into the appropriate slots of the big counts array.
+% (Recall that observed discrete nodes only have a single effective value.)
+% (A more general, but much slower, way is to call add_evidence_to_dmarginal.)
+% We assume the F nodes are never observed.
+
+obs_self = ~hidden_bitv(self);
+obs_Qps = (~isempty(Qps)) & (~any(hidden_bitv(Qps))); % we assume that all or none of the Q parents are observed
+
+if obs_self
+ self_val = evidence{self};
+ oldself_val = evidence{old_self};
+end
+
+if obs_Qps
+ Qps_val = subv2ind(Qpsz, cat(1, evidence{Qps}));
+ if Qps_val == 0
+ keyboard
+ end
+end
+
+if CPD.d==1 % no Qps from above
+ if ~CPD.F1toQ1 % no F from self
+ % marg(Q1(t-1), F2(t-1), Q1(t))
+ % F2(t-1) P(Q1(t)=j | Q1(t-1)=i)
+ % 1 delta(i,j)
+ % 2 transprob(i,j)
+ if obs_self
+ hor_counts = zeros(Qsz, Qsz);
+ hor_counts(oldself_val, self_val) = fmarginal.T(2);
+ else
+ marg = reshape(fmarginal.T, [Qsz 2 Qsz]);
+ hor_counts = squeeze(marg(:,2,:));
+ end
+ else
+ % marg(Q1(t-1), F2(t-1), F1(t-1), Q1(t))
+ % F2(t-1) F1(t-1) P(Qd(t)=j| Qd(t-1)=i)
+ % ------------------------------------------------------
+ % 1 1 delta(i,j)
+ % 2 1 transprob(i,j)
+ % 1 2 impossible
+ % 2 2 startprob(j)
+ if obs_self
+ marg = myreshape(fmarginal.T, [1 2 2 1]);
+ hor_counts = zeros(Qsz, Qsz);
+ hor_counts(oldself_val, self_val) = marg(1,2,1,1);
+ ver_counts = zeros(Qsz, 1);
+ %ver_counts(self_val) = marg(1,2,2,1);
+ ver_counts(self_val) = marg(1,2,2,1) + marg(1,1,2,1);
+ else
+ marg = reshape(fmarginal.T, [Qsz 2 2 Qsz]);
+ hor_counts = squeeze(marg(:,2,1,:));
+ %ver_counts = squeeze(sum(marg(:,2,2,:),1)); % sum over i
+ ver_counts = squeeze(sum(marg(:,2,2,:),1)) + squeeze(sum(marg(:,1,2,:),1)); % sum i,b
+ end
+ end % F1toQ1
+else % d ~= 1
+ if CPD.d < CPD.D % general case
+ % marg(Qd(t-1), Fd+1(t-1), Fd(t-1), Qps(t), Qd(t))
+ % Fd+1(t-1) Fd(t-1) P(Qd(t)=j| Qd(t-1)=i, Qps(t)=k)
+ % ------------------------------------------------------
+ % 1 1 delta(i,j)
+ % 2 1 transprob(i,k,j)
+ % 1 2 impossible
+ % 2 2 startprob(k,j)
+ if obs_Qps & obs_self
+ marg = myreshape(fmarginal.T, [1 2 2 1 1]);
+ k = 1;
+ hor_counts = zeros(Qsz, Qpsz, Qsz);
+ hor_counts(oldself_val, Qps_val, self_val) = marg(1, 2,1, k,1);
+ ver_counts = zeros(Qpsz, Qsz);
+ %ver_counts(Qps_val, self_val) = marg(1, 2,2, k,1);
+ ver_counts(Qps_val, self_val) = marg(1, 2,2, k,1) + marg(1, 1,2, k,1);
+ elseif obs_Qps & ~obs_self
+ marg = myreshape(fmarginal.T, [Qsz 2 2 1 Qsz]);
+ k = 1;
+ hor_counts = zeros(Qsz, Qpsz, Qsz);
+ hor_counts(:, Qps_val, :) = marg(:, 2,1, k,:);
+ ver_counts = zeros(Qpsz, Qsz);
+ %ver_counts(Qps_val, :) = sum(marg(:, 2,2, k,:), 1);
+ ver_counts(Qps_val, :) = sum(marg(:, 2,2, k,:), 1) + sum(marg(:, 1,2, k,:), 1);
+ elseif ~obs_Qps & obs_self
+ error('not yet implemented')
+ else % everything is hidden
+ marg = reshape(fmarginal.T, [Qsz 2 2 Qpsz Qsz]);
+ hor_counts = squeeze(marg(:,2,1,:,:)); % i,k,j
+ %ver_counts = squeeze(sum(marg(:,2,2,:,:),1)); % sum over i
+ ver_counts = squeeze(sum(marg(:,2,2,:,:),1)) + squeeze(sum(marg(:,1,2,:,:),1)); % sum over i,b
+ end
+ else % d == D, so no F from below
+ % marg(QD(t-1), FD(t-1), Qps(t), QD(t))
+ % FD(t-1) P(QD(t)=j | QD(t-1)=i, Qps(t)=k)
+ % 1 transprob(i,k,j)
+ % 2 startprob(k,j)
+ if obs_Qps & obs_self
+ marg = myreshape(fmarginal.T, [1 2 1 1]);
+ k = 1;
+ hor_counts = zeros(Qsz, Qpsz, Qsz);
+ hor_counts(oldself_val, Qps_val, self_val) = marg(1, 1, k,1);
+ ver_counts = zeros(Qpsz, Qsz);
+ ver_counts(Qps_val, self_val) = marg(1, 2, k,1);
+ elseif obs_Qps & ~obs_self
+ marg = myreshape(fmarginal.T, [Qsz 2 1 Qsz]);
+ k = 1;
+ hor_counts = zeros(Qsz, Qpsz, Qsz);
+ hor_counts(:, Qps_val, :) = marg(:, 1, k,:);
+ ver_counts = zeros(Qpsz, Qsz);
+ ver_counts(Qps_val, :) = sum(marg(:, 2, k, :), 1);
+ elseif ~obs_Qps & obs_self
+ error('not yet implemented')
+ else % everything is hidden
+ marg = reshape(fmarginal.T, [Qsz 2 Qpsz Qsz]);
+ hor_counts = squeeze(marg(:,1,:,:));
+ ver_counts = squeeze(sum(marg(:,2,:,:),1)); % sum over i
+ end
+ end
+end
+
+CPD.sub_CPD_trans = update_ess_simple(CPD.sub_CPD_trans, hor_counts);
+
+if ~isempty(CPD.sub_CPD_start)
+ CPD.sub_CPD_start = update_ess_simple(CPD.sub_CPD_start, ver_counts);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_ess2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_ess2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,178 @@
+function CPD = update_ess2(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a hhmm Q node.
+% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, idden_bitv)
+
+% Figure out the node numbers associated with each parent
+dom = fmarginal.domain;
+self = dom(end); % by assumption
+old_self = dom(CPD.old_self_ndx);
+Fself = dom(CPD.Fself_ndx);
+Fbelow = dom(CPD.Fbelow_ndx);
+Qps = dom(CPD.Qps_ndx);
+
+Qsz = CPD.Qsz;
+Qpsz = CPD.Qpsz;
+
+
+fmarg = add_ev_to_dmarginal(fmarginal, evidence, ns);
+
+
+
+% hor_counts(old_self, Qps, self),
+% fmarginal(old_self, Fbelow, Fself, Qps, self)
+% hor_counts(i,k,j) = fmarginal(i,2,1,k,j) % below has finished, self has not
+% ver_counts(i,k,j) = fmarginal(i,2,2,k,j) % below has finished, and so has self (reset)
+% Since any of i,j,k may be observed, we write
+% hor_counts(counts_ndx{:}) = fmarginal(fmarg_ndx{:})
+% where e.g., counts_ndx = {1, ':', 2} if Qps is hidden but we observe old_self=1, self=2.
+% To create this counts_ndx, we write counts_ndx = mk_multi_ndx(3, obs_dim, obs_val)
+% where counts_obs_dim = [1 3], counts_obs_val = [1 2] specifies the values of dimensions 1 and 3.
+
+counts_obs_dim = [];
+fmarg_obs_dim = [];
+obs_val = [];
+if hidden_bitv(self)
+ effQsz = Qsz;
+else
+ effQsz = 1;
+ counts_obs_dim = [counts_obs_dim 3];
+ fmarg_obs_dim = [fmarg_obs_dim 5];
+ obs_val = [obs_val evidence{self}];
+end
+
+% e.g., D=4, d=3, Qps = all Qs above, so dom = [Q3(t-1) F4(t-1) F3(t-1) Q1(t) Q2(t) Q3(t)].
+% so self = Q3(t), old_self = Q3(t-1), CPD.Qps = [1 2], Qps = [Q1(t) Q2(t)]
+dom = fmarginal.domain;
+self = dom(end);
+old_self = dom(1);
+Qps = dom(length(dom)-length(CPD.Qps):end-1);
+
+Qsz = CPD.Qsizes(CPD.d);
+Qpsz = prod(CPD.Qsizes(CPD.Qps));
+
+% If some of the Q nodes are observed (which happens during supervised training)
+% the counts will only be non-zero in positions
+% consistent with the evidence. We put the computed marginal responsibilities
+% into the appropriate slots of the big counts array.
+% (Recall that observed discrete nodes only have a single effective value.)
+% (A more general, but much slower, way is to call add_evidence_to_dmarginal.)
+% We assume the F nodes are never observed.
+
+obs_self = ~hidden_bitv(self);
+obs_Qps = (~isempty(Qps)) & (~any(hidden_bitv(Qps))); % we assume that all or none of the Q parents are observed
+
+if obs_self
+ self_val = evidence{self};
+ oldself_val = evidence{old_self};
+end
+
+if obs_Qps
+ Qps_val = subv2ind(Qpsz, cat(1, evidence{Qps}));
+ if Qps_val == 0
+ keyboard
+ end
+end
+
+if CPD.d==1 % no Qps from above
+ if ~CPD.F1toQ1 % no F from self
+ % marg(Q1(t-1), F2(t-1), Q1(t))
+ % F2(t-1) P(Q1(t)=j | Q1(t-1)=i)
+ % 1 delta(i,j)
+ % 2 transprob(i,j)
+ if obs_self
+ hor_counts = zeros(Qsz, Qsz);
+ hor_counts(oldself_val, self_val) = fmarginal.T(2);
+ else
+ marg = reshape(fmarginal.T, [Qsz 2 Qsz]);
+ hor_counts = squeeze(marg(:,2,:));
+ end
+ else
+ % marg(Q1(t-1), F2(t-1), F1(t-1), Q1(t))
+ % F2(t-1) F1(t-1) P(Qd(t)=j| Qd(t-1)=i)
+ % ------------------------------------------------------
+ % 1 1 delta(i,j)
+ % 2 1 transprob(i,j)
+ % 1 2 impossible
+ % 2 2 startprob(j)
+ if obs_self
+ marg = myreshape(fmarginal.T, [1 2 2 1]);
+ hor_counts = zeros(Qsz, Qsz);
+ hor_counts(oldself_val, self_val) = marg(1,2,1,1);
+ ver_counts = zeros(Qsz, 1);
+ %ver_counts(self_val) = marg(1,2,2,1);
+ ver_counts(self_val) = marg(1,2,2,1) + marg(1,1,2,1);
+ else
+ marg = reshape(fmarginal.T, [Qsz 2 2 Qsz]);
+ hor_counts = squeeze(marg(:,2,1,:));
+ %ver_counts = squeeze(sum(marg(:,2,2,:),1)); % sum over i
+ ver_counts = squeeze(sum(marg(:,2,2,:),1)) + squeeze(sum(marg(:,1,2,:),1)); % sum i,b
+ end
+ end % F1toQ1
+else % d ~= 1
+ if CPD.d < CPD.D % general case
+ % marg(Qd(t-1), Fd+1(t-1), Fd(t-1), Qps(t), Qd(t))
+ % Fd+1(t-1) Fd(t-1) P(Qd(t)=j| Qd(t-1)=i, Qps(t)=k)
+ % ------------------------------------------------------
+ % 1 1 delta(i,j)
+ % 2 1 transprob(i,k,j)
+ % 1 2 impossible
+ % 2 2 startprob(k,j)
+ if obs_Qps & obs_self
+ marg = myreshape(fmarginal.T, [1 2 2 1 1]);
+ k = 1;
+ hor_counts = zeros(Qsz, Qpsz, Qsz);
+ hor_counts(oldself_val, Qps_val, self_val) = marg(1, 2,1, k,1);
+ ver_counts = zeros(Qpsz, Qsz);
+ %ver_counts(Qps_val, self_val) = marg(1, 2,2, k,1);
+ ver_counts(Qps_val, self_val) = marg(1, 2,2, k,1) + marg(1, 1,2, k,1);
+ elseif obs_Qps & ~obs_self
+ marg = myreshape(fmarginal.T, [Qsz 2 2 1 Qsz]);
+ k = 1;
+ hor_counts = zeros(Qsz, Qpsz, Qsz);
+ hor_counts(:, Qps_val, :) = marg(:, 2,1, k,:);
+ ver_counts = zeros(Qpsz, Qsz);
+ %ver_counts(Qps_val, :) = sum(marg(:, 2,2, k,:), 1);
+ ver_counts(Qps_val, :) = sum(marg(:, 2,2, k,:), 1) + sum(marg(:, 1,2, k,:), 1);
+ elseif ~obs_Qps & obs_self
+ error('not yet implemented')
+ else % everything is hidden
+ marg = reshape(fmarginal.T, [Qsz 2 2 Qpsz Qsz]);
+ hor_counts = squeeze(marg(:,2,1,:,:)); % i,k,j
+ %ver_counts = squeeze(sum(marg(:,2,2,:,:),1)); % sum over i
+ ver_counts = squeeze(sum(marg(:,2,2,:,:),1)) + squeeze(sum(marg(:,1,2,:,:),1)); % sum over i,b
+ end
+ else % d == D, so no F from below
+ % marg(QD(t-1), FD(t-1), Qps(t), QD(t))
+ % FD(t-1) P(QD(t)=j | QD(t-1)=i, Qps(t)=k)
+ % 1 transprob(i,k,j)
+ % 2 startprob(k,j)
+ if obs_Qps & obs_self
+ marg = myreshape(fmarginal.T, [1 2 1 1]);
+ k = 1;
+ hor_counts = zeros(Qsz, Qpsz, Qsz);
+ hor_counts(oldself_val, Qps_val, self_val) = marg(1, 1, k,1);
+ ver_counts = zeros(Qpsz, Qsz);
+ ver_counts(Qps_val, self_val) = marg(1, 2, k,1);
+ elseif obs_Qps & ~obs_self
+ marg = myreshape(fmarginal.T, [Qsz 2 1 Qsz]);
+ k = 1;
+ hor_counts = zeros(Qsz, Qpsz, Qsz);
+ hor_counts(:, Qps_val, :) = marg(:, 1, k,:);
+ ver_counts = zeros(Qpsz, Qsz);
+ ver_counts(Qps_val, :) = sum(marg(:, 2, k, :), 1);
+ elseif ~obs_Qps & obs_self
+ error('not yet implemented')
+ else % everything is hidden
+ marg = reshape(fmarginal.T, [Qsz 2 Qpsz Qsz]);
+ hor_counts = squeeze(marg(:,1,:,:));
+ ver_counts = squeeze(sum(marg(:,2,:,:),1)); % sum over i
+ end
+ end
+end
+
+CPD.sub_CPD_trans = update_ess_simple(CPD.sub_CPD_trans, hor_counts);
+
+if ~isempty(CPD.sub_CPD_start)
+ CPD.sub_CPD_start = update_ess_simple(CPD.sub_CPD_start, ver_counts);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_ess3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_ess3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,80 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a hhmm Q node.
+% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, idden_bitv)
+%
+% we assume if one of the Qps is observed, all of them are
+% We assume the F nodes are already hidden
+
+% Figure out the node numbers associated with each parent
+dom = fmarginal.domain;
+self = dom(CPD.self_ndx);
+old_self = dom(CPD.old_self_ndx);
+%Fself = dom(CPD.Fself_ndx);
+%Fbelow = dom(CPD.Fbelow_ndx);
+Qps = dom(CPD.Qps_ndx);
+
+Qsz = CPD.Qsz;
+Qpsz = CPD.Qpsz;
+
+
+% hor_counts(old_self, Qps, self),
+% fmarginal(old_self, Fbelow, Fself, Qps, self)
+% hor_counts(i,k,j) = fmarginal(i,2,1,k,j) % below has finished, self has not
+% ver_counts(i,k,j) = fmarginal(i,2,2,k,j) % below has finished, and so has self (reset)
+% Since any of i,j,k may be observed, we write
+% hor_counts(ndx{:}) = fmarginal(...)
+% where e.g., ndx = {1, ':', 2} if Qps is hidden but we observe old_self=1, self=2.
+
+% ndx{i,k,j}
+if hidden_bitv(old_self)
+ ndx{1} = ':';
+else
+ ndx{1} = evidence{old_self};
+end
+if hidden_bitv(Qps)
+ ndx{2} = ':';
+else
+ ndx{2} = subv2ind(Qpsz, cat(1, evidence{Qps}));
+end
+if hidden_bitv(self)
+ ndx{3} = ':';
+else
+ ndx{3} = evidence{self};
+end
+
+fmarg = add_ev_to_dmarginal(fmarginal, evidence, ns);
+% marg(Qold(t-1), Fbelow(t-1), Fself(t-1), Qps(t), Qself(t))
+hor_counts = zeros(Qsz, Qpsz, Qsz);
+ver_counts = zeros(Qpsz, Qsz);
+
+if ~isempty(CPD.Fbelow_ndx)
+ if ~isempty(CPD.Fself_ndx) % general case
+ fmarg.T = myreshape(fmarg.T, [Qsz 2 2 Qpsz Qsz]);
+ marg_ndx = {ndx{1}, 2, 1, ndx{2}, ndx{3}};
+ hor_counts(ndx{:}) = fmarg.T(marg_ndx{:});
+ ver_counts(ndx{2:3}) = ... % sum over Fbelow and Qold=i
+ sum(fmarg.T({ndx{1}, 1, 2, ndx{2}, ndx{3}}),1) + ..
+ sum(fmarg.T({ndx{1}, 2, 2, ndx{2}, ndx{3}}),1);
+ else % no F from self, hence no startprob
+ fmarg.T = myreshape(fmarg.T, [Qsz 2 Qpsz Qsz]);
+ hor_counts(ndx{:}) = fmarg.T({ndx{1}, 2, ndx{2}, ndx{3}});
+ end
+else % no F signal from below
+ if ~isempty(CPD.Fself_ndx) % self F
+ fmarg.T = myreshape(fmarg.T, [Qsz 2 Qpsz Qsz]);
+ hor_counts(ndx{:}) = fmarg.T({ndx{1}, 1, ndx{2}, ndx{3}});
+ ver_counts(ndx{2:3}) = ... % sum over Qold=i
+ sum(fmarg.T({ndx{1}, 2, ndx{2}, ndx{3}}),1);
+ else % no F from self
+ error('An hhmmQ node without any F parents is just a tabular_CPD')
+ end
+end
+
+
+CPD.sub_CPD_trans = update_ess_simple(CPD.sub_CPD_trans, hor_counts);
+
+if ~isempty(CPD.sub_CPD_start)
+ CPD.sub_CPD_start = update_ess_simple(CPD.sub_CPD_start, ver_counts);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_ess4.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/Old/update_ess4.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,95 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a hhmm Q node.
+% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, idden_bitv)
+%
+% we assume if one of the Qps is observed, all of them are
+% We assume the F nodes are already hidden
+
+% Figure out the node numbers associated with each parent
+dom = fmarginal.domain;
+self = dom(CPD.self_ndx);
+old_self = dom(CPD.old_self_ndx);
+%Fself = dom(CPD.Fself_ndx);
+%Fbelow = dom(CPD.Fbelow_ndx);
+Qps = dom(CPD.Qps_ndx);
+
+Qsz = CPD.Qsz;
+Qpsz = CPD.Qpsz;
+
+
+% hor_counts(old_self, Qps, self),
+% fmarginal(old_self, Fbelow, Fself, Qps, self)
+% hor_counts(i,k,j) = fmarginal(i,2,1,k,j) % below has finished, self has not
+% ver_counts(i,k,j) = fmarginal(i,2,2,k,j) % below has finished, and so has self (reset)
+% Since any of i,j,k may be observed, we write
+% hor_counts(i_counts_ndx, kndx, jndx) = fmarginal(i_fmarg_ndx...)
+% where i_fmarg_ndx = 1 and i_counts_ndx = i if old_self is observed to have value i,
+% i_fmarg_ndx = 1:Qsz and i_counts_ndx = 1:Qsz if old_self is hidden, etc.
+
+
+if hidden_bitv(old_self)
+ i_counts_ndx = 1:Qsz;
+ i_fmarg_ndx = 1:Qsz;
+ eff_oldQsz = Qsz;
+else
+ i_counts_ndx = evidence{old_self};
+ i_fmarg_ndx = 1;
+ eff_oldQsz = 1;
+end
+
+if all(hidden_bitv(Qps)) % we assume all are hidden or all are observed
+ k_counts_ndx = 1:Qpsz;
+ k_fmarg_ndx = 1:Qpsz;
+ eff_Qpsz = Qpsz;
+else
+ k_counts_ndx = subv2ind(Qpsz, cat(1, evidence{Qps}));
+ k_fmarg_ndx = 1;
+ eff_Qpsz = 1;
+end
+
+if hidden_bitv(self)
+ j_counts_ndx = 1:Qsz;
+ j_fmarg_ndx = 1:Qsz;
+ eff_Qsz = Qsz;
+else
+ j_counts_ndx = evidence{self};
+ j_fmarg_ndx = 1;
+ eff_Qsz = 1;
+end
+
+hor_counts = zeros(Qsz, Qpsz, Qsz);
+ver_counts = zeros(Qpsz, Qsz);
+
+if ~isempty(CPD.Fbelow_ndx)
+ if ~isempty(CPD.Fself_ndx) % general case
+ fmarg.T = myreshape(fmarg.T, [eff_oldQsz 2 2 eff_Qpsz eff_Qsz]);
+ hor_counts(i_counts_ndx, k_counts_ndx, j_counts_ndx) = ...
+ fmarg.T(:, i_fmarg_ndx, 2, 1, k_fmarg_ndx, j_fmarg_ndx);
+ ver_counts(k_counts_ndx, j_counts_ndx) = ... % sum over Fbelow and Qold
+ sum(fmarg.T(:, 1, 2, k_fmarg_ndx, j_fmarg_ndx), 1) + ...
+ sum(fmarg.T(:, 2, 2, k_fmarg_ndx, j_fmarg_ndx), 1);
+ else % no F from self, hence no startprob
+ fmarg.T = myreshape(fmarg.T, [eff_oldQsz 2 eff_Qpsz eff_Qsz]);
+ hor_counts(i_counts_ndx, k_counts_ndx, j_counts_ndx) = ...
+ fmarg.T(i_fmarg_ndx, 2, k_fmarg_ndx, j_fmarg_ndx);
+ end
+else % no F signal from below
+ if ~isempty(CPD.Fself_ndx) % self F
+ fmarg.T = myreshape(fmarg.T, [eff_oldQsz 2 eff_Qpsz eff_Qsz]);
+ hor_counts(i_counts_ndx, k_counts_ndx, j_counts_ndx) = ...
+ fmarg.T(i_fmarg_ndx, 1, k_fmarg_ndx, j_fmarg_ndx);
+ ver_counts(k_counts_ndx, j_counts_ndx) = ... % sum over Qold
+ sum(fmarg.T(:, 2, k_fmarg_ndx, j_fmarg_ndx), 1);
+ else % no F from self
+ error('An hhmmQ node without any F parents is just a tabular_CPD')
+ end
+end
+
+
+CPD.sub_CPD_trans = update_ess_simple(CPD.sub_CPD_trans, hor_counts);
+
+if ~isempty(CPD.sub_CPD_start)
+ CPD.sub_CPD_start = update_ess_simple(CPD.sub_CPD_start, ver_counts);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/hhmmQ_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/hhmmQ_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,132 @@
+function CPD = hhmmQ_CPD(bnet, self, varargin)
+% HHMMQ_CPD Make the CPD for a Q node in a hierarchical HMM
+% CPD = hhmmQ_CPD(bnet, self, ...)
+%
+% Fself(t-1) Qps(t)
+% \ |
+% \ v
+% Qold(t-1) -> Q(t)
+% /
+% /
+% Fbelow(t-1)
+%
+% Let ss = slice size = num. nodes per slice.
+% This node is Q(t), and has mandatory parents Qold(t-1) (assumed to be numbered Q(t)-ss)
+% and optional parents Fbelow, Fself, Qps.
+% We require parents to be ordered (numbered) as follows:
+% Qold, Fbelow, Fself, Qps, Q.
+%
+% If Fself=2, we use the transition matrix, else we use the prior matrix.
+% If Fself node is omitted (eg. top level), we always use the transition matrix.
+% If Fbelow=2, we may change state, otherwise we must stay in the same state.
+% If Fbelow node is omitted (eg., bottom level), we may change state at every step.
+% If Qps (Q parents) are specified, all parameters are conditioned on their joint value.
+% We may choose any subset of nodes to condition on, as long as they as numbered lower than self.
+%
+% optional args [defaults]
+%
+% Fself - node number <= ss
+% Fbelow - node number <= ss
+% Qps - node numbers (all <= 2*ss) - uses 2TBN indexing
+% transprob - transprob(i,k,j) = prob transition from i to j given Qps = k ['leftright']
+% selfprob - prob of a transition from i to i given Qps=k [0.1]
+% startprob - startprob(k,j) = prob start in j given Qps = k ['leftstart']
+% startargs - other args to be passed to the sub tabular_CPD for learning startprob
+% transargs - other args will be passed to the sub tabular_CPD for learning transprob
+% fullstartprob - 1 means startprob depends on Q(t-1) [0]
+% hhmmQ_CPD is a subclass of tabular_CPD so we inherit inference methods like CPD_to_pot, etc.
+%
+% We create isolated tabular_CPDs with no F parents to learn transprob/startprob
+% so we can avail of e.g., entropic or Dirichlet priors.
+% In the future, we will be able to represent the transprob using a tree_CPD.
+%
+% For details, see "Linear-time inference in hierarchical HMMs", Murphy and Paskin, NIPS'01.
+
+
+ss = bnet.nnodes_per_slice;
+ns = bnet.node_sizes(:);
+
+% set default arguments
+Fself = [];
+Fbelow = [];
+Qps = [];
+startprob = 'leftstart';
+transprob = 'leftright';
+startargs = {};
+transargs = {};
+selfprob = 0.1;
+fullstartprob = 0;
+
+for i=1:2:length(varargin)
+ switch varargin{i},
+ case 'Fself', Fself = varargin{i+1};
+ case 'Fbelow', Fbelow = varargin{i+1};
+ case 'Qps', Qps = varargin{i+1};
+ case 'transprob', transprob = varargin{i+1};
+ case 'selfprob', selfprob = varargin{i+1};
+ case 'startprob', startprob = varargin{i+1};
+ case 'startargs', startargs = varargin{i+1};
+ case 'transargs', transargs = varargin{i+1};
+ case 'fullstartprob', fullstartprob = varargin{i+1};
+ end
+end
+
+CPD.fullstartprob = fullstartprob;
+
+ps = parents(bnet.dag, self);
+ndsz = ns(:)';
+CPD.dom_sz = [ndsz(ps) ns(self)];
+CPD.Fself_ndx = find_equiv_posns(Fself, ps);
+CPD.Fbelow_ndx = find_equiv_posns(Fbelow, ps);
+%CPD.Qps_ndx = find_equiv_posns(Qps+ss, ps);
+CPD.Qps_ndx = find_equiv_posns(Qps, ps);
+old_self = self-ss;
+CPD.old_self_ndx = find_equiv_posns(old_self, ps);
+
+Qps = ps(CPD.Qps_ndx);
+CPD.Qsz = ns(self);
+CPD.Qpsz = prod(ns(Qps));
+CPD.Qpsizes = ns(Qps);
+Qsz = CPD.Qsz;
+Qpsz = CPD.Qpsz;
+
+if strcmp(transprob, 'leftright')
+ LR = mk_leftright_transmat(Qsz, selfprob);
+ transprob = repmat(reshape(LR, [1 Qsz Qsz]), [Qpsz 1 1]); % transprob(k,i,j)
+ transprob = permute(transprob, [2 1 3]); % now transprob(i,k,j)
+end
+transargs{end+1} = 'CPT';
+transargs{end+1} = transprob;
+CPD.sub_CPD_trans = mk_isolated_tabular_CPD(ns([old_self Qps self]), transargs);
+S = struct(CPD.sub_CPD_trans);
+%CPD.transprob = myreshape(S.CPT, [Qsz Qpsz Qsz]);
+CPD.transprob = S.CPT;
+
+
+if strcmp(startprob, 'leftstart')
+ startprob = zeros(Qpsz, Qsz);
+ startprob(:,1) = 1;
+end
+if isempty(CPD.Fself_ndx)
+ CPD.sub_CPD_start = [];
+ CPD.startprob = [];
+else
+ startargs{end+1} = 'CPT';
+ startargs{end+1} = startprob;
+ if CPD.fullstartprob
+ CPD.sub_CPD_start = mk_isolated_tabular_CPD(ns([self Qps self]), startargs);
+ S = struct(CPD.sub_CPD_start);
+ %CPD.startprob = myreshape(S.CPT, [Qsz Qpsz Qsz]);
+ CPD.startprob = S.CPT;
+ else
+ CPD.sub_CPD_start = mk_isolated_tabular_CPD(ns([Qps self]), startargs);
+ S = struct(CPD.sub_CPD_start);
+ %CPD.startprob = myreshape(S.CPT, [CPD.Qpsizes Qsz]);
+ CPD.startprob = S.CPT;
+ end
+end
+
+CPD = class(CPD, 'hhmmQ_CPD', tabular_CPD(bnet, self));
+
+CPD = update_CPT(CPD);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/log_prior.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/log_prior.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function L = log_prior(CPD)
+% LOG_PRIOR Return log P(theta) for a hhmm CPD
+% L = log_prior(CPD)
+
+L = log_prior(CPD.sub_CPD_trans);
+if ~isempty(CPD.sub_CPD_start)
+ L = L + log_prior(CPD.sub_CPD_start);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/maximize_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/maximize_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function CPD = maximize_params(CPD, temp)
+% MAXIMIZE_PARAMS Set the params of a hhmmQ node to their ML/MAP values.
+% CPD = maximize_params(CPD, temperature)
+
+Qsz = CPD.Qsz;
+Qpsz = CPD.Qpsz;
+
+if ~isempty(CPD.sub_CPD_start)
+ CPD.sub_CPD_start = maximize_params(CPD.sub_CPD_start, temp);
+ S = struct(CPD.sub_CPD_start);
+ CPD.startprob = myreshape(S.CPT, [Qpsz Qsz]);
+ %CPD.startprob = S.CPT;
+end
+
+if 1
+ % If we are in a state that can only go the end state,
+ % we will never see a transition to another (non-end) state,
+ % so counts(i,k,j)=0 (and termprob(k,i)=1).
+ % We set counts(i,k,i)=1 in this case.
+ % This will cause remove_hhmm_end_state to return a
+ % stochastic matrix, but otherwise has no effect on EM.
+ counts = get_field(CPD.sub_CPD_trans, 'counts');
+ counts = reshape(counts, [Qsz Qpsz Qsz]);
+ for k=1:Qpsz
+ for i=1:Qsz
+ if sum(counts(i,k,:))==0 % never witnessed a transition out of i
+ counts(i,k,i)=1; % add self loop
+ %fprintf('CPDQ d=%d i=%d k=%d\n', CPD.d, i, k);
+ end
+ end
+ end
+ CPD.sub_CPD_trans = set_fields(CPD.sub_CPD_trans, 'counts', counts(:));
+end
+
+CPD.sub_CPD_trans = maximize_params(CPD.sub_CPD_trans, temp);
+S = struct(CPD.sub_CPD_trans);
+%CPD.transprob = S.CPT;
+CPD.transprob = myreshape(S.CPT, [Qsz Qpsz Qsz]);
+
+CPD = update_CPT(CPD);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/reset_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/reset_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function CPD = reset_ess(CPD)
+% RESET_ESS Reset the Expected Sufficient Statistics of a hhmm Q node.
+% CPD = reset_ess(CPD)
+
+if ~isempty(CPD.sub_CPD_start)
+ CPD.sub_CPD_start = reset_ess(CPD.sub_CPD_start);
+end
+CPD.sub_CPD_trans = reset_ess(CPD.sub_CPD_trans);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/update_CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/update_CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,70 @@
+function CPD = update_CPT(CPD)
+% Compute the big CPT for an HHMM Q node (including F parents) given internal transprob and startprob
+% function CPD = update_CPT(CPD)
+
+Qsz = CPD.Qsz;
+Qpsz = CPD.Qpsz;
+
+if ~isempty(CPD.Fbelow_ndx)
+ if ~isempty(CPD.Fself_ndx) % general case
+ % Fb(t-1) Fself(t-1) P(Q(t)=j| Q(t-1)=i, Qps(t)=k)
+ % ------------------------------------------------------
+ % 1 1 delta(i,j)
+ % 2 1 transprob(i,k,j)
+ % 1 2 impossible
+ % 2 2 startprob(k,j)
+ CPT = zeros(Qsz, 2, 2, Qpsz, Qsz);
+ I = repmat(eye(Qsz), [1 1 Qpsz]); % i,j,k
+ I = permute(I, [1 3 2]); % i,k,j
+ CPT(:, 1, 1, :, :) = I;
+ CPT(:, 2, 1, :, :) = CPD.transprob;
+ CPT(:, 1, 2, :, :) = I;
+ CPT(:, 2, 2, :, :) = repmat(reshape(CPD.startprob, [1 Qpsz Qsz]), ...
+ [Qsz 1 1]); % replicate over i
+ else % no F from self, hence no startprob
+ % Fb(t-1) P(Q(t)=j| Q(t-1)=i, Qps(t)=k)
+ % ------------------------------------------------------
+ % 1 delta(i,j)
+ % 2 transprob(i,k,j)
+
+ nps = length(CPD.dom_sz)-1; % num parents
+ CPT = 0*myones(CPD.dom_sz);
+ %CPT = zeros(Qsz, 2, Qpsz, Qsz); % assumes CPT(Q(t-1), F(t-1), Qps, Q(t))
+ % but a member of Qps may preceed Q(t-1) or F(t-1) in the ordering
+
+ for k=1:CPD.Qpsz
+ Qps_vals = ind2subv(CPD.Qpsizes, k);
+ ndx = mk_multi_index(nps+1, [CPD.Fbelow_ndx CPD.Qps_ndx], [1 Qps_vals]);
+ CPT(ndx{:}) = eye(Qsz); % CPT(:,2,k,:) or CPT(:,k,2,:) etc
+ end
+ ndx = mk_multi_index(nps+1, CPD.Fbelow_ndx, 2);
+ CPT(ndx{:}) = CPD.transprob; % we assume transprob is in topo order
+ end
+else % no F signal from below
+ if ~isempty(CPD.Fself_ndx)
+ % Q(t-1), Fself(t-1), Qps, Q(t)
+
+ % Fself(t-1) P(Q(t-1)=i, Qps(t)=k -> Q(t)=j)
+ % ------------------------------------------------------
+ % 1 transprob(i,k,j)
+ % 2 startprob(k,j)
+
+ nps = length(CPD.dom_sz)-1; % num parents
+ CPT = 0*myones(CPD.dom_sz);
+ ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 1);
+ CPT(ndx{:}) = CPD.transprob;
+ if CPD.fullstartprob
+ ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 2);
+ CPT(ndx{:}) = CPD.startprob;
+ else
+ for i=1:CPD.Qsz
+ ndx = mk_multi_index(nps+1, [CPD.Fself_ndx CPD.old_self_ndx], [2 i]);
+ CPT(ndx{:}) = CPD.startprob;
+ end
+ end
+ else % no F from self
+ error('An hhmmQ node without any F parents is just a tabular_CPD')
+ end
+end
+
+CPD = set_fields(CPD, 'CPT', CPT);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/update_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmQ_CPD/update_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a hhmm Q node.
+% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, idden_bitv)
+%
+% we assume if one of the Qps is observed, all of them are
+% We assume the F nodes are already hidden
+
+% Figure out the node numbers associated with each parent
+dom = fmarginal.domain;
+self = dom(end);
+old_self = dom(CPD.old_self_ndx);
+%Fself = dom(CPD.Fself_ndx);
+%Fbelow = dom(CPD.Fbelow_ndx);
+Qps = dom(CPD.Qps_ndx);
+
+Qsz = CPD.Qsz;
+Qpsz = CPD.Qpsz;
+
+
+% hor_counts(old_self, Qps, self),
+% fmarginal(old_self, Fbelow, Fself, Qps, self)
+% hor_counts(i,k,j) = fmarginal(i,2,1,k,j) % below has finished, self has not
+% ver_counts(i,k,j) = fmarginal(i,2,2,k,j) % below has finished, and so has self (reset)
+% Since any of i,j,k may be observed, we write
+% hor_counts(i_counts_ndx, kndx, jndx) = fmarginal(i_fmarg_ndx...)
+% where i_fmarg_ndx = 1 and i_counts_ndx = i if old_self is observed to have value i,
+% i_fmarg_ndx = 1:Qsz and i_counts_ndx = 1:Qsz if old_self is hidden, etc.
+
+
+if hidden_bitv(old_self)
+ i_counts_ndx = 1:Qsz;
+ eff_oldQsz = Qsz;
+else
+ i_counts_ndx = evidence{old_self};
+ eff_oldQsz = 1;
+end
+
+if all(hidden_bitv(Qps)) % we assume all are hidden or all are observed
+ k_counts_ndx = 1:Qpsz;
+ eff_Qpsz = Qpsz;
+else
+ k_counts_ndx = subv2ind(Qpsz, cat(1, evidence{Qps}));
+ eff_Qpsz = 1;
+end
+
+if hidden_bitv(self)
+ j_counts_ndx = 1:Qsz;
+ eff_Qsz = Qsz;
+else
+ j_counts_ndx = evidence{self};
+ eff_Qsz = 1;
+end
+
+hor_counts = zeros(Qsz, Qpsz, Qsz);
+ver_counts = zeros(Qpsz, Qsz);
+
+if ~isempty(CPD.Fbelow_ndx)
+ if ~isempty(CPD.Fself_ndx) % general case
+ fmarg = myreshape(fmarginal.T, [eff_oldQsz 2 2 eff_Qpsz eff_Qsz]);
+ hor_counts(i_counts_ndx, k_counts_ndx, j_counts_ndx) = fmarg(:, 2, 1, :, :);
+ ver_counts(k_counts_ndx, j_counts_ndx) = ... % sum over Fbelow and Qold
+ sumv(fmarg(:, :, 2, :, :), [1 2]); % require Fself=2
+ else % no F from self, hence no startprob
+ fmarg = myreshape(fmarginal.T, [eff_oldQsz 2 eff_Qpsz eff_Qsz]);
+ hor_counts(i_counts_ndx, k_counts_ndx, j_counts_ndx) = ...
+ fmarg(:, 2, :, :); % require Fbelow = 2
+ end
+else % no F signal from below
+ if ~isempty(CPD.Fself_ndx) % self F
+ fmarg = myreshape(fmarginal.T, [eff_oldQsz 2 eff_Qpsz eff_Qsz]);
+ hor_counts(i_counts_ndx, k_counts_ndx, j_counts_ndx) = fmarg(:, 1, :, :);
+ ver_counts(k_counts_ndx, j_counts_ndx) = ... % sum over Qold
+ squeeze(sum(fmarg(:, 2, :, :), 1)); % Fself=2
+ else % no F from self
+ error('An hhmmQ node without any F parents is just a tabular_CPD')
+ end
+end
+
+
+CPD.sub_CPD_trans = update_ess_simple(CPD.sub_CPD_trans, hor_counts);
+
+if ~isempty(CPD.sub_CPD_start)
+ CPD.sub_CPD_start = update_ess_simple(CPD.sub_CPD_start, ver_counts);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/convert_to_table.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/maximize_params.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mlp_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/reset_ess.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/update_ess.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@mlp_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/convert_to_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/convert_to_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,80 @@
+function T = convert_to_table(CPD, domain, evidence)
+% CONVERT_TO_TABLE Convert a mlp CPD to a table, incorporating any evidence
+% T = convert_to_table(CPD, domain, evidence)
+
+self = domain(end);
+ps = domain(1:end-1); % self' parents
+%cps = myintersect(ps, cnodes); % self' continous parents
+cnodes = domain(CPD.cpndx);
+cps = myintersect(ps, cnodes);
+odom = domain(~isemptycell(evidence(domain))); % obs nodes in the net
+assert(myismember(cps, odom)); % !ALL the CTS parents must be observed!
+ns(cps)=1;
+dps = mysetdiff(ps, cps); % self' discrete parents
+dobs = myintersect(dps, odom); % discrete obs parents
+
+% Extract the params compatible with the observations (if any) on the discrete parents (if any)
+
+if ~isempty(dobs),
+ dvals = cat(1, evidence{dobs});
+ ns_eff= CPD.sizes; % effective node sizes
+ ens=ns_eff;
+ ens(dobs) = 1;
+ S=prod(ens(dps));
+ subs = ind2subv(ens(dps), 1:S);
+ mask = find_equiv_posns(dobs, dps);
+ for i=1:length(mask),
+ subs(:,mask(i)) = dvals(i);
+ end
+ support = subv2ind(ns_eff(dps), subs)';
+else
+ ns_eff= CPD.sizes;
+ support=[1:prod(ns_eff(dps))];
+end
+
+W1=[]; b1=[]; W2=[]; b2=[];
+
+W1 = CPD.W1(:,:,support);
+b1= CPD.b1(support,:);
+W2 = CPD.W2(:,:,support);
+b2= CPD.b2(support,:);
+ns(odom) = 1;
+dpsize = prod(ns(dps)); % overall size of the self' discrete parents
+
+x = cat(1, evidence{cps});
+ndata=size(x,2);
+
+if ~isempty(evidence{self}) %
+ app=struct(CPD); %
+ ns(self)=app.mlp{1}.nout; % pump up self to the original dimension if observed
+ clear app; %
+end %
+
+T =zeros(dpsize, ns(self)); %
+for i=1:dpsize %
+ W1app = W1(:,:,i); %
+ b1app = b1(i,:); %
+ W2app = W2(:,:,i); %
+ b2app = b2(i,:); % for each of the dpsize combinations of self'parents values
+ z = tanh(x(:)'*W1app + ones(ndata, 1)*b1app); % we tabulate the corrisponding glm model
+ a = z*W2app + ones(ndata, 1)*b2app; % (element of the cell array CPD.glim)
+ appoggio = normalise(exp(a)); %
+ T(i,:)=appoggio; %
+ W1app=[]; W2app=[]; b1app=[]; b2app=[]; %
+ z=[]; a=[]; appoggio=[]; %
+end %
+
+if ~isempty(evidence{self})
+ appoggio=[]; %
+ appoggio=zeros(1,ns(self)); %
+ r = evidence{self}; %...if self is observed => in output there's only the probability of the 'true' class
+ for i=1:dpsize %
+ appoggio(i)=T(i,r); %
+ end
+ T=zeros(dpsize,1);
+ for i=1:dpsize
+ T(i,1)=appoggio(i);
+ end
+ clear appoggio;
+ ns(self) = 1;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/maximize_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/maximize_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function CPD = maximize_params(CPD, temp)
+% MAXIMIZE_PARAMS Find ML params of an MLP using Scaled Conjugated Gradient (SCG)
+% CPD = maximize_params(CPD, temperature)
+% temperature parameter is ignored
+
+if ~adjustable_CPD(CPD), return; end
+options = foptions;
+
+% options(1) >= 0 means print an annoying message when the max. num. iter. is reached
+if CPD.verbose
+ options(1) = 1;
+else
+ options(1) = -1;
+end
+%options(1) = CPD.verbose;
+
+options(2) = CPD.wthresh;
+options(3) = CPD.llthresh;
+options(14) = CPD.max_iter;
+
+dpsz=length(CPD.mlp);
+
+for i=1:dpsz
+ mask=[];
+ mask=find(CPD.eso_weights(:,:,i)>0); % for adapting the parameters we use only positive weighted example
+ if ~isempty(mask),
+ CPD.mlp{i} = netopt_weighted(CPD.mlp{i}, options, CPD.parent_vals(mask',:), CPD.self_vals(mask',:,i), CPD.eso_weights(mask',:,i), 'scg');
+
+ CPD.W1(:,:,i)=CPD.mlp{i}.w1; % update the parameters matrix
+ CPD.b1(i,:)=CPD.mlp{i}.b1; %
+ CPD.W2(:,:,i)=CPD.mlp{i}.w2; % update the parameters matrix
+ CPD.b2(i,:)=CPD.mlp{i}.b2; %
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/mlp_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/mlp_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,139 @@
+function CPD = mlp_CPD(bnet, self, nhidden, w1, b1, w2, b2, clamped, max_iter, verbose, wthresh, llthresh)
+% MLP_CPD Make a CPD from a Multi Layer Perceptron (i.e., feedforward neural network)
+%
+% We use a different MLP for each discrete parent combination (if there are any discrete parents).
+% We currently assume this node (the child) is discrete.
+%
+% CPD = mlp_CPD(bnet, self, nhidden)
+% will create a CPD with random parameters, where self is the number of this node and nhidden the number of the hidden nodes.
+% The params are drawn from N(0, s*I), where s = 1/sqrt(n+1), n = length(X).
+%
+% CPD = mlp_CPD(bnet, self, nhidden, w1, b1, w2, b2) allows you to specify the params, where
+% w1 = first-layer weight matrix
+% b1 = first-layer bias vector
+% w2 = second-layer weight matrix
+% b2 = second-layer bias vector
+% These are assumed to be the same for each discrete parent combination.
+% If any of these are [], random values will be created.
+%
+% CPD = mlp_CPD(bnet, self, nhidden, w1, b1, w2, b2, clamped) allows you to prevent the params from being
+% updated during learning (if clamped = 1). Default: clamped = 0.
+%
+% CPD = mlp_CPD(bnet, self, nhidden, w1, b1, w2, b2, clamped, max_iter, verbose, wthresh, llthresh)
+% alllows you to specify params that control the M step:
+% max_iter - the maximum number of steps to take (default: 10)
+% verbose - controls whether to print (default: 0 means silent).
+% wthresh - a measure of the precision required for the value of
+% the weights W at the solution. Default: 1e-2.
+% llthresh - a measure of the precision required of the objective
+% function (log-likelihood) at the solution. Both this and the previous condition must
+% be satisfied for termination. Default: 1e-2.
+%
+% For learning, we use a weighted version of scaled conjugated gradient in the M step.
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'mlp_CPD', discrete_CPD(0,[]));
+ return;
+elseif isa(bnet, 'mlp_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+assert(myismember(self, bnet.dnodes));
+ns = bnet.node_sizes;
+
+ps = parents(bnet.dag, self);
+dnodes = mysetdiff(1:length(bnet.dag), bnet.cnodes);
+dps = myintersect(ps, dnodes);
+cps = myintersect(ps, bnet.cnodes);
+dpsz = prod(ns(dps));
+cpsz = sum(ns(cps));
+self_size = ns(self);
+
+% discrete/cts parent index - which ones of my parents are discrete/cts?
+CPD.dpndx = find_equiv_posns(dps, ps);
+CPD.cpndx = find_equiv_posns(cps, ps);
+
+CPD.mlp = cell(1,dpsz);
+for i=1:dpsz
+ CPD.mlp{i} = mlp(cpsz, nhidden, self_size, 'softmax');
+ if nargin >=4 & ~isempty(w1)
+ CPD.mlp{i}.w1 = w1;
+ end
+ if nargin >=5 & ~isempty(b1)
+ CPD.mlp{i}.b1 = b1;
+ end
+ if nargin >=6 & ~isempty(w2)
+ CPD.mlp{i}.w2 = w2;
+ end
+ if nargin >=7 & ~isempty(b2)
+ CPD.mlp{i}.b2 = b2;
+ end
+ W1app(:,:,i)=CPD.mlp{i}.w1;
+ W2app(:,:,i)=CPD.mlp{i}.w2;
+ b1app(i,:)=CPD.mlp{i}.b1;
+ b2app(i,:)=CPD.mlp{i}.b2;
+end
+if nargin < 8, clamped = 0; end
+if nargin < 9, max_iter = 10; end
+if nargin < 10, verbose = 0; end
+if nargin < 11, wthresh = 1e-2; end
+if nargin < 12, llthresh = 1e-2; end
+
+CPD.self = self;
+CPD.max_iter = max_iter;
+CPD.verbose = verbose;
+CPD.wthresh = wthresh;
+CPD.llthresh = llthresh;
+
+% sufficient statistics
+% Since MLP is not in the exponential family, we must store all the raw data.
+%
+CPD.W1=W1app; % Extract all the parameters of the node for handling discrete obs parents
+CPD.W2=W2app; %
+nparaW=[size(W1app) size(W2app)]; %
+CPD.b1=b1app; %
+CPD.b2=b2app; %
+nparab=[size(b1app) size(b2app)]; %
+
+CPD.sizes=bnet.node_sizes(:); % used in CPD_to_table to pump up the node sizes
+
+CPD.parent_vals = []; % X(l,:) = value of cts parents in l'th example
+
+CPD.eso_weights=[]; % weights used by the SCG algorithm
+
+CPD.self_vals = []; % Y(l,:) = value of self in l'th example
+
+% For BIC
+CPD.nsamples = 0;
+CPD.nparams=prod(nparaW)+prod(nparab);
+CPD = class(CPD, 'mlp_CPD', discrete_CPD(clamped, ns([ps self])));
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.mlp = {};
+CPD.self = [];
+CPD.max_iter = [];
+CPD.verbose = [];
+CPD.wthresh = [];
+CPD.llthresh = [];
+CPD.approx_hess = [];
+CPD.W1 = [];
+CPD.W2 = [];
+CPD.b1 = [];
+CPD.b2 = [];
+CPD.sizes = [];
+CPD.parent_vals = [];
+CPD.eso_weights=[];
+CPD.self_vals = [];
+CPD.nsamples = [];
+CPD.nparams = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/reset_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/reset_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function CPD = reset_ess(CPD)
+% RESET_ESS Reset the Expected Sufficient Statistics for a CPD (mlp)
+% CPD = reset_ess(CPD)
+
+CPD.W1 = [];
+CPD.W2 = [];
+CPD.b1 = [];
+CPD.b2 = [];
+CPD.parent_vals = [];
+CPD.eso_weights=[];
+CPD.self_vals = [];
+CPD.nsamples = 0;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/update_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@mlp_CPD/update_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,131 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a CPD (MLP)
+% CPD = update_ess(CPD, family_marginal, evidence, node_sizes, cnodes, hidden_bitv)
+%
+% fmarginal = overall posterior distribution of self and its parents
+% fmarginal(i1,i2...,ik,s)=prob(Pa1=i1,...,Pak=ik, self=s| X)
+%
+% => 1) prob(self|Pa1,...,Pak)=fmarginal/prob(Pa1,...,Pak) with prob(Pa1,...,Pak)=sum{s,fmarginal}
+% [self estimation -> CPD.self_vals]
+% 2) prob(Pa1,...,Pak) [SCG weights -> CPD.eso_weights]
+%
+% Hidden_bitv is ignored
+
+% Written by Pierpaolo Brutti
+
+if ~adjustable_CPD(CPD), return; end
+
+dom = fmarginal.domain;
+cdom = myintersect(dom, cnodes);
+assert(~any(isemptycell(evidence(cdom))));
+ns(cdom)=1;
+
+self = dom(end);
+ps=dom(1:end-1);
+dpdom=mysetdiff(ps,cdom);
+
+dnodes = mysetdiff(1:length(ns), cnodes);
+
+ddom = myintersect(ps, dnodes); %
+if isempty(evidence{self}), % if self is hidden in what follow we must
+ ddom = myintersect(dom, dnodes); % consider its dimension
+end %
+
+odom = dom(~isemptycell(evidence(dom)));
+hdom = dom(isemptycell(evidence(dom))); % hidden parents in domain
+
+dobs = myintersect(ddom, odom);
+dvals = cat(1, evidence{dobs});
+ens = ns; % effective node sizes
+ens(dobs) = 1;
+
+dpsz=prod(ns(dpdom));
+S=prod(ens(ddom));
+subs = ind2subv(ens(ddom), 1:S);
+mask = find_equiv_posns(dobs, ddom);
+for i=1:length(mask),
+ subs(:,mask(i)) = dvals(i);
+end
+supportedQs = subv2ind(ns(ddom), subs);
+
+Qarity = prod(ns(ddom));
+if isempty(ddom),
+ Qarity = 1;
+end
+fullm.T = zeros(Qarity, 1);
+fullm.T(supportedQs) = fmarginal.T(:);
+
+% For dynamic (recurrent) net-------------------------------------------------------------
+% ----------------------------------------------------------------------------------------
+high=size(evidence,1); % slice height
+ss_ns=ns(1:high); % single slice nodes sizes
+pos=self; %
+slice_num=0; %
+while pos>high, %
+ slice_num=slice_num+1; % find active slice
+ pos=pos-high; % pos=self posistion into a single slice
+end %
+
+last_dim=pos-1; %
+if isempty(evidence{self}), %
+ last_dim=pos; %
+end % last_dim=last reshaping dimension
+reg=dom-slice_num*high;
+dex=myintersect(reg(find(reg>=0)), [1:last_dim]); %
+rs_dim=ss_ns(dex); % reshaping dimensions
+
+if slice_num>0,
+ act_slice=[]; past_ancest=[]; %
+ act_slice=slice_num*high+[1:high]; % recover the active slice nodes
+ % past_ancest=mysetdiff(ddom, act_slice);
+ past_ancest=mysetdiff(ps, act_slice); % recover ancestors contained into past slices
+ app=ns(past_ancest);
+ rs_dim=[app(:)' rs_dim(:)']; %
+end %
+if length(rs_dim)==1, rs_dim=[1 rs_dim]; end %
+if size(rs_dim,1)~=1, rs_dim=rs_dim'; end %
+
+fullm.T=reshape(fullm.T, rs_dim); % reshaping the marginal
+
+% ----------------------------------------------------------------------------------------
+% ----------------------------------------------------------------------------------------
+
+% X = cts parent, R = discrete self
+
+% 1) observations vector -> CPD.parents_vals -------------------------------------------------
+x = cat(1, evidence{cdom});
+
+% 2) weights vector -> CPD.eso_weights -------------------------------------------------------
+if isempty(evidence{self}) % R is hidden
+ sum_over=length(rs_dim);
+ app=sum(fullm.T, sum_over);
+ pesi=reshape(app,[dpsz,1]);
+ clear app;
+else
+ pesi=reshape(fullm.T,[dpsz,1]);
+end
+
+assert(approxeq(sum(pesi),1));
+
+% 3) estimate (if R is hidden) or recover (if R is obs) self'value----------------------------
+if isempty(evidence{self}) % R is hidden
+ app=mk_stochastic(fullm.T); % P(self|Pa1,...,Pak)=fmarginal/prob(Pa1,...,Pak)
+ app=reshape(app,[dpsz ns(self)]); % matrix size: prod{j,ns(Paj)} x ns(self)
+ r=app;
+ clear app;
+else
+ r = zeros(dpsz,ns(self));
+ for i=1:dpsz
+ if pesi(i)~=0, r(i,evidence{self}) = 1; end
+ end
+end
+for i=1:dpsz
+ if pesi(i) ~=0, assert(approxeq(sum(r(i,:)),1)); end
+end
+
+CPD.nsamples = CPD.nsamples + 1;
+CPD.parent_vals(CPD.nsamples,:) = x(:)';
+for i=1:dpsz
+ CPD.eso_weights(CPD.nsamples,:,i)=pesi(i);
+ CPD.self_vals(CPD.nsamples,:,i) = r(i,:);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CPD_to_CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CPD_to_CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function CPT = CPD_to_CPT(CPD)
+% CPD_TO_CPT Convert the discrete CPD to tabular form (noisyor)
+% CPT = CPD_to_CPT(CPD)
+%
+% CPT(U1,...,Un, X) = Pr(X|U1,...,Un) where the Us are the parents (excluding leak).
+
+if ~isempty(CPD.CPT)
+ CPT = CPD.CPT; % remember to flush cache if params change (e.g., during learning)
+ return;
+end
+
+q = [CPD.leak_inhibit CPD.inhibit(:)'];
+% q(i) is the prob. that the i'th parent will be inhibited (flipped from 1 to 0).
+% q(1) is the leak inhibition probability, and length(q) = n + 1.
+
+if length(q)==1
+ CPT = [q 1-q];
+ return;
+end
+
+n = length(q);
+Bn = ind2subv(2*ones(1,n), 1:(2^n))-1; % all n bit vectors, with the left most column toggling fastest (LSB)
+CPT = zeros(2^n, 2);
+% Pr(X=0 | U_1 .. U_n) = prod_{i: U_i = on} q_i = prod_i q_i ^ U_i = exp(u' * log(q_i))
+% This method is problematic when q contains zeros
+
+Q = repmat(q(:)', 2^n, 1);
+Q(logical(~Bn)) = 1;
+CPT(:,1) = prod(Q,2);
+CPT(:,2) = 1-CPT(:,1);
+
+CPT = reshape(CPT(2:2:end), 2*ones(1,n)); % skip cases in which the leak is off
+
+CPD.CPT = CPT;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CPD_to_lambda_msg.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CPD_to_lambda_msg.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence)
+% CPD_TO_LAMBDA_MSG Compute lambda message (noisyor)
+% lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p)
+% Pearl p190 top eqn
+
+switch msg_type
+ case 'd',
+ l0 = msg{n}.lambda(1);
+ l1 = msg{n}.lambda(2);
+ Pi = sum_prod_CPD_and_pi_msgs(CPD, n, ps, msg, p);
+ i = find(p==ps); % p is n's i'th parent
+ q = CPD.inhibit(i);
+ lam_msg = zeros(2,1);
+ for u=0:1
+ lam_msg(u+1) = l1 - (q^u)*(l1 - l0)*Pi;
+ end
+ case 'g',
+ error('noisyor_CPD can''t create Gaussian msgs')
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CPD_to_pi.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CPD_to_pi.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence)
+% CPD_TO_PI Compute pi vector (noisyor)
+% pi = CPD_to_pi(CPD, msg_type, n, ps, msg)
+% Pearl p188 eqn 4.57
+
+switch msg_type
+ case 'd',
+ pi = sum_prod_CPD_and_pi_msgs(CPD, n, ps, msg);
+ pi = [pi 1-pi]';
+ case 'g',
+ error('can''t convert noisy-or CPD to Gaussian pi')
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/CPD_to_CPT.m/1.1.1.1/Mon Aug 2 22:23:32 2004//
+/CPD_to_lambda_msg.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/CPD_to_pi.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/noisyor_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@noisyor_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/noisyor_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/noisyor_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,79 @@
+function CPD = noisyor_CPD(bnet, self, leak_inhibit, inhibit)
+% NOISYOR_CPD Make a noisy-or CPD
+% CPD = NOISYOR_CPD(BNET, NODE_NUM, LEAK_INHIBIT, INHIBIT)
+%
+% A noisy-or node turns on if any of its parents are on, provided they are not inhibited.
+% The prob. that the i'th parent gets inhibited (flipped from 1 to 0) is inhibit(i).
+% The prob that the leak node (a dummy parent that is always on) gets inhibit is leak_inhibit.
+% These params default to random values if omitted.
+%
+% Example: suppose C has parents A and B, and the
+% link of A->C fails with prob pA and the link B->C fails with pB.
+% Then the noisy-OR gate defines the following distribution
+%
+% A B P(C=0)
+% 0 0 1.0
+% 1 0 pA
+% 0 1 pB
+% 1 1 pA * PB
+%
+% Currently, learning is not supported for noisy-or nodes
+% (since the M step is somewhat complicated).
+%
+% For simple generalizations of the noisy-OR model, see e.g.,
+% - Srinivas, "A generalization of the noisy-OR model", UAI 93
+% - Meek and Heckerman, "Learning Causal interaction models", UAI 97.
+
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'noisyor_CPD', discrete_CPD(1, []));
+ return;
+elseif isa(bnet, 'noisyor_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+
+ps = parents(bnet.dag, self);
+fam = [ps self];
+ns = bnet.node_sizes;
+assert(all(ns(fam)==2));
+assert(isempty(myintersect(fam, bnet.cnodes)));
+
+if nargin < 3, leak_inhibit = rand(1, 1); end
+if nargin < 4, inhibit = rand(1, length(ps)); end
+
+CPD.self = self;
+CPD.inhibit = inhibit;
+CPD.leak_inhibit = leak_inhibit;
+
+
+% For BIC
+CPD.nparams = 0;
+CPD.nsamples = 0;
+
+CPD.CPT = []; % cached copy, to speed up CPD_to_CPT
+
+clamped = 1;
+CPD = class(CPD, 'noisyor_CPD', discrete_CPD(clamped, ns([ps self])));
+
+
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.self = [];
+CPD.inhibit = [];
+CPD.leak_inhibit = [];
+CPD.nparams = [];
+CPD.nsamples = [];
+CPD.CPT = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/sum_prod_CPD_and_pi_msgs.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@noisyor_CPD/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/private/sum_prod_CPD_and_pi_msgs.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@noisyor_CPD/private/sum_prod_CPD_and_pi_msgs.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function pi = sum_prod_CPD_and_pi_msgs(CPD, n, ps, msg, except)
+% SUM_PROD_CPD_AND_PI_MSGS Compute pi = sum_{u\p} P(n|u) prod_{ui in ps\p} pi_msg(ui->n)
+% pi = sum_prod_CPD_and_pi_msgs(CPD, n, ps, msg, p)
+%
+% pi = prod_i (qi pi_msg(ui->n) + 1 - pi_msg(ui->n)) = prod_i (1 - ci pi_msg(ui->n))
+% is the product of the endorsement withheld (Pearl p188 eqn 4.56)
+% We skip p from this product, if specified.
+
+if nargin < 5, except = -1; end
+pi = 1;
+for i=1:length(ps)
+ p = ps(i);
+ if p ~= except
+ pi_from_parent = msg{n}.pi_from_parent{i};
+ q = CPD.inhibit(i);
+ c = 1-q;
+ pi = pi * (1 - c*pi_from_parent(2));
+ end
+end
+% The pi msg that a leak node sends to its child is [0 1]
+% since its own pi is [0 1] and its lambda to self is [0 1].
+q = CPD.leak_inhibit;
+% 1 - c*pi_from_parent = 1-c*1 = q
+pi = pi * q;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CPD_to_pi.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CPD_to_pi.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence)
+% CPD_TO_PI Compute the pi vector (root)
+% function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence)
+
+self_ev = evidence{n};
+switch msg_type
+ case 'd',
+ error('root_CPD can''t create discrete msgs')
+ case 'g',
+ pi.mu = self_ev;
+ pi.Sigma = zeros(size(self_ev));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/CPD_to_pi.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/convert_to_pot.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/log_marg_prob_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/log_prob_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/root_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@root_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/Old/CPD_to_CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/Old/CPD_to_CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function CPT = CPD_to_CPT(CPD)
+% CPD_TO_CPT Convert the CPD to tabular form (root)
+% CPT = CPD_to_CPT(CPD)
+
+CPT = 1;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/CPD_to_CPT.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@root_CPD/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/convert_to_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/convert_to_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function pot = convert_to_pot(CPD, pot_type, domain, evidence)
+% CONVERT_TO_POT Convert a root CPD to one or more potentials
+% pots = convert_to_pot(CPD, pot_type, domain, evidence)
+
+assert(length(domain)==1);
+assert(~isempty(evidence(domain)));
+T = 1;
+
+sz = CPD.sizes;
+ns = zeros(1, max(domain));
+ns(domain) = sz;
+
+switch pot_type
+ case 'u',
+ pot = upot(domain, 1, T, 0);
+ case 'd',
+ ns(domain) = 1;
+ pot = dpot(domain, ns(domain), T);
+ case {'c','g'},
+ ns(domain) = 0;
+ pot = cpot(domain, ns(domain), 0);
+ case 'cg',
+ ddom = [];
+ cdom = domain; % we assume the root node is cts
+ %pot = cgpot(ddom, cdom, ns, {cpot([],[],0)});
+ pot = cgpot(ddom, cdom, ns);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/log_marg_prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/log_marg_prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function L = log_marg_prob_node(CPD, self_ev, pev)
+% LOG_MARG_PROB_NODE Compute prod_m log int_{theta_i} P(x(i,m)| x(pi_i,m), theta_i) for node i (root)
+% L = log_marg_prob_node(CPD, self_ev, pev)
+%
+% self_ev{m} is the evidence on this node in case m
+% pev{i,m} is the evidence on the i'th parent in case m (ignored)
+% We always return L = 0.
+
+L = 0;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/log_prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/log_prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function L = log_prob_node(CPD, self_ev, pev)
+% LOG_PROB_NODE Compute prod_m log P(x(i,m)| x(pi_i,m), theta_i) for node i (root)
+% L = log_prob_node(CPD, self_ev, pev)
+%
+% self_ev{m} is the evidence on this node in case m
+% pev{i,m} is the evidence on the i'th parent in case m (ignored)
+% We always return L = 0.
+
+L = 0;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/root_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/root_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+function CPD = root_CPD(bnet, self, val)
+% ROOT_CPD Make a conditional prob. distrib. which has no parameters.
+% CPD = ROOT_CPD(BNET, NODE_NUM, VAL)
+%
+% The node must not have any parents and is assumed to always be observed.
+% It is a way of modelling exogenous inputs to a model.
+% VAL is the value to which the root is clamped (default: [])
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'root_CPD', generic_CPD(1));
+ return;
+elseif isa(bnet, 'root_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+
+if nargin < 3, val = []; end
+
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, self);
+if ~isempty(ps)
+ error('root CPDs should have no parents')
+end
+
+CPD.self = self;
+CPD.val = val;
+CPD.sizes = ns(self);
+
+clamped = 1;
+CPD = class(CPD, 'root_CPD', generic_CPD(clamped));
+
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.self = [];
+CPD.val = [];
+CPD.sizes = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/sample_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@root_CPD/sample_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function y = sample_node(CPD, pev)
+% SAMPLE_NODE Draw a random sample from P(Y|pa(y), theta) (root)
+% Y = SAMPLE_NODE(CPD, PEV)
+%
+% pev{i} is the evidence on the i'th parent.
+% Since a root has no parents, we ignore pev,
+% and return the value the root was clamped to when it was created.
+
+y = CPD.val;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+/convert_to_pot.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/convert_to_table.m/1.1.1.1/Tue Mar 30 17:19:22 2004//
+/display.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/get_field.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/maximize_params.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/reset_ess.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/set_fields.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/softmax_CPD.m/1.1.1.1/Tue Jan 7 16:25:14 2003//
+/update_ess.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@softmax_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/convert_to_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/convert_to_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,58 @@
+function pot = convert_to_pot(CPD, pot_type, domain, evidence)
+% CONVERT_TO_POT Convert a softmax CPD to a potential
+% pots = convert_to_pot(CPD, pot_type, domain, evidence)
+%
+% pots = CPD evaluated using evidence(domain)
+
+ncases = size(domain,2);
+assert(ncases==1); % not yet vectorized
+
+sz = dom_sizes(CPD);
+ns = zeros(1, max(domain));
+ns(domain) = sz;
+
+odom = domain(~isemptycell(evidence(domain)));
+T = convert_to_table(CPD, domain, evidence);
+
+switch pot_type
+ case 'u',
+ pot = upot(domain, sz, T, 0*myones(sz));
+ case 'd',
+ ns(odom) = 1;
+ pot = dpot(domain, ns(domain), T);
+
+ case {'c','g'},
+ % Since we want the output to be a Gaussian, the whole family must be observed.
+ % In other words, the potential is really just a constant.
+ p = T;
+ %p = prob_node(CPD, evidence(domain(end)), evidence(domain(1:end-1)));
+ ns(domain) = 0;
+ pot = cpot(domain, ns(domain), log(p));
+
+ case 'cg',
+ T = T(:);
+ ns(odom) = 1;
+ can = cell(1, length(T));
+ for i=1:length(T)
+ can{i} = cpot([], [], log(T(i)));
+ end
+ ps = domain(1:end-1);
+ dps = ps(CPD.dpndx);
+ cps = ps(CPD.cpndx);
+ ddom = [dps CPD.self];
+ cdom = cps;
+ pot = cgpot(ddom, cdom, ns, can);
+
+ case 'scg'
+ T = T(:);
+ ns(odom) = 1;
+ pot_array = cell(1, length(T));
+ for i=1:length(T)
+ pot_array{i} = scgcpot([], [], T(i));
+ end
+ pot = scgpot(domain, [], [], ns, pot_array);
+
+ otherwise,
+ error(['unrecognized pot type ' pot_type])
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/convert_to_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/convert_to_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function T = convert_to_table(CPD, domain, evidence)
+% CONVERT_TO_TABLE Convert a softmax CPD to a table, incorporating any evidence
+% T = convert_to_table(CPD, domain, evidence)
+
+self = domain(end);
+ps = domain(1:end-1);
+cnodes = domain(CPD.cpndx);
+cps = myintersect(ps, cnodes);
+dps = domain(CPD.dpndx);
+dps_as_cps = domain(CPD.dps_as_cps.ndx);
+all_dps = union(dps,dps_as_cps);
+odom = domain(~isemptycell(evidence(domain)));
+if ~isempty(cps), assert(myismember(cps, odom)); end % all cts parents must be observed
+
+ns = zeros(1, max(domain));
+ns(domain) = CPD.sizes;
+ens = ns; % effective node sizes
+ens(odom) = 1;
+
+% dpsize >= glimsz because the glm parameters are tied across the dps_as_cps parents
+dpsize = prod(ens(all_dps)); % size of ALL self'discrete parents
+dpvals = cat(1, evidence{myintersect(all_dps, odom)});
+cpvals = cat(1, evidence{cps});
+if ~isempty(dps_as_cps),
+ separator = CPD.dps_as_cps.separator;
+ dp_as_cpmap = find_equiv_posns(dps_as_cps, all_dps);
+ dops_map = find_equiv_posns(myintersect(all_dps, odom), all_dps);
+ puredp_map = find_equiv_posns(dps, all_dps);
+ subs = ind2subv(ens(all_dps), 1:prod(ens(all_dps)));
+ if ~isempty(dops_map), subs(:,dops_map) = subs(:,dops_map)+repmat(dpvals(:)',[size(subs,1) 1])-1; end
+end
+
+[w,b] = extract_params(CPD);
+T = zeros(dpsize, ns(self));
+for i=1:dpsize,
+ active_glm = i;
+ dp_as_cpvals=zeros(1,sum(ns(dps_as_cps)));
+ if ~isempty(dps_as_cps),
+ active_glm = max([1,subv2ind(ns(dps), subs(i,puredp_map))]);
+ % Extract the params compatible with the observations (if any) on the 'pure' discrete parents (if any)
+ where_one = separator + subs(i,dp_as_cpmap);
+ % and get in the dp_as_cp parents...
+ dp_as_cpvals(where_one)=1;
+ end
+ T(i,:) = normalise(exp([dp_as_cpvals(:); cpvals(:)]'*w(:,:,active_glm) + b(:,active_glm)'));
+end
+if myismember(self, odom)
+ r = evidence{self};
+ T = T(:,r);
+end
+
+T = myreshape(T, ens(domain));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(CPD)
+
+disp('softmax_CPD object');
+disp(struct(CPD));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/get_field.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/get_field.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function val = get_params(CPD, name)
+% GET_PARAMS Get the parameters (fields) for a softmax_CPD object
+% val = get_params(CPD, name)
+%
+% The following fields can be accessed
+%
+% weights - W(X,Y,Q)
+% offset - b(Y,Q)
+%
+% e.g., W = get_params(CPD, 'weights')
+
+[W, b] = extract_params(CPD);
+switch name
+ case 'weights', val = W;
+ case 'offset', val = b;
+ otherwise,
+ error(['invalid argument name ' name]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/maximize_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/maximize_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function CPD = maximize_params(CPD, temp)
+% MAXIMIZE_PARAMS Set the params of a CPD to their ML values (dsoftmax) using IRLS
+% CPD = maximize_params(CPD, temperature)
+% temperature parameter is ignored
+
+% Written by Pierpaolo Brutti
+
+if ~adjustable_CPD(CPD), return; end
+options = foptions;
+
+if CPD.verbose
+ options(1) = 1;
+else
+ options(1) = -1;
+end
+%options(1) = CPD.verbose;
+
+options(2) = CPD.wthresh;
+options(3) = CPD.llthresh;
+options(5) = CPD.approx_hess;
+options(14) = CPD.max_iter;
+
+dpsize = size(CPD.self_vals,3);
+for i=1:dpsize,
+ mask=find(CPD.eso_weights(:,:,i)>0); % for adapting the parameters we use only positive weighted example
+ if ~isempty(mask),
+ if ~isempty(CPD.dps_as_cps.ndx),
+ puredp_map = find_equiv_posns(CPD.dpndx, union(CPD.dpndx, CPD.dps_as_cps.ndx)); % find the glm structure
+ subs = ind2subv(CPD.sizes(union(CPD.dpndx, CPD.dps_as_cps.ndx)),i); % that corrisponds to the
+ active_glm = max([1,subv2ind(CPD.sizes(CPD.dpndx), subs(puredp_map))]); % i-th 'fictitious' example
+
+ CPD.glim{active_glm} = netopt_weighted(CPD.glim{active_glm}, options, CPD.parent_vals(mask',:,i),...
+ CPD.self_vals(mask',:,i), CPD.eso_weights(mask',:,i), 'scg');
+ else
+ alfa = 0.4; if CPD.solo, alfa = 1; end % learning step = 1 <=> self is all alone in the net
+ CPD.glim{i} = glmtrain_weighted(CPD.glim{i}, options, CPD.parent_vals(mask',:),...
+ CPD.self_vals(mask',:,i), CPD.eso_weights(mask',:,i), alfa);
+ end
+ end
+ mask=[];
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/extract_params.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@softmax_CPD/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/private/extract_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/private/extract_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function [W, b] = extract_params(CPD)
+
+% W(X,Y,Q), b(Y,Q) where Y = ns(self), X = ns(cps), Q = prod(ns(dps))
+
+glimsz = prod(CPD.sizes(CPD.dpndx));
+ss = CPD.sizes(end);
+cpsz = sum(CPD.sizes(CPD.cpndx));
+dp_as_cpsz = sum(CPD.sizes(CPD.dps_as_cps.ndx));
+W = zeros(dp_as_cpsz + cpsz, ss, glimsz);
+b = zeros(ss, glimsz);
+
+for i=1:glimsz
+ W(:,:,i) = CPD.glim{i}.w1;
+ b(:,i) = CPD.glim{i}.b1(:);
+end
+
+W = myreshape(W, [dp_as_cpsz + cpsz ss CPD.sizes(CPD.dpndx)]);
+b = myreshape(b, [ss CPD.sizes(CPD.dpndx)]);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/reset_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/reset_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function CPD = reset_ess(CPD)
+% RESET_ESS Reset the Expected Sufficient Statistics for a CPD (dsoftmax)
+% CPD = reset_ess(CPD)
+
+CPD.parent_vals = [];
+CPD.eso_weights=[];
+CPD.self_vals = [];
+CPD.nsamples = 0;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/sample_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/sample_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function y = sample_node(CPD, pvals)
+% SAMPLE_NODE Draw a random sample from P(Xi | x(pi_i), theta_i) (discrete)
+% y = sample_node(CPD, parent_evidence)
+%
+% parent_evidence{i} is the value of the i'th parent
+
+n = length(pvals)+1;
+dom = 1:n;
+%evidence = cell(1,n);
+%evidence(1:n-1) = pvals(:)';
+evidence = pvals;
+evidence{end+1} = [];
+T = convert_to_table(CPD, dom, evidence);
+y = sample_discrete(T);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/set_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/set_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+function CPD = set_params(CPD, varargin)
+% SET_PARAMS Set the parameters (fields) for a softmax_CPD object
+% CPD = set_params(CPD, name/value pairs)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% (Let ns(i) be the size of node i, X = ns(X), Y = ns(Y), Q1=ns(dps(1)), Q2=ns(dps(2)), ...
+% where dps are the discrete parents; if there are no discrete parents, we set Q1=1.)
+%
+% weights - (W(:,j,a,b,...) - W(:,j',a,b,...)) is ppn to dec. boundary
+% between j,j' given Q1=a,Q2=b,... [ randn(X,Y,Q1,Q2,...) ]
+% offset - (offset(j,a,b,...) - offset(j',a,b,...)) is the offset to dec. boundary
+% between j,j' given Q1=a,Q2=b,... [ randn(Y,Q1,Q2,...) ]
+% clamped - 'yes' means don't adjust params during learning ['no']
+% max_iter - the maximum number of steps to take [10]
+% verbose - 'yes' means print the LL at each step of IRLS ['no']
+% wthresh - convergence threshold for weights [1e-2]
+% llthresh - convergence threshold for log likelihood [1e-2]
+% approx_hess - 'yes' means approximate the Hessian for speed ['no']
+%
+% e.g., CPD = set_params(CPD,'offset', zeros(ns(i),1));
+
+args = varargin;
+nargs = length(args);
+glimsz = prod(CPD.sizes(CPD.dpndx));
+for i=1:2:nargs
+ switch args{i},
+ case 'discrete', str='nothing to do';
+ case 'clamped', CPD = set_clamped(CPD, strcmp(args{i+1}, 'yes'));
+ case 'max_iter', CPD.max_iter = args{i+1};
+ case 'verbose', CPD.verbose = strcmp(args{i+1}, 'yes');
+ case 'max_iter', CPD.max_iter = args{i+1};
+ case 'wthresh', CPD.wthresh = args{i+1};
+ case 'llthresh', CPD.llthresh = args{i+1};
+ case 'approx_hess', CPD.approx_hess = strcmp(args{i+1}, 'yes');
+ case 'weights', for q=1:glimsz, CPD.glim{q}.w1 = args{i+1}(:,:,q); end;
+ case 'offset',
+ if glimsz == 1
+ CPD.glim{1}.b1 = args{i+1};
+ else
+ for q=1:glimsz, CPD.glim{q}.b1 = args{i+1}(:,q); end;
+ end
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/softmax_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/softmax_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,187 @@
+function CPD = softmax_CPD(bnet, self, varargin)
+% SOFTMAX_CPD Make a softmax (multinomial logit) CPD
+%
+% To define this CPD precisely, let W be an (m x n) matrix with W(i,:) = {i-th row of B}
+% => we can define the following vectorial function:
+%
+% softmax: R^n |--> R^m
+% softmax(z,i-th)=exp(W(i,:)*z)/sum_k(exp(W(k,:)*z))
+%
+% (this constructor augments z with a one at the beginning to introduce an offset term (=bias, intercept))
+% Now call the continuous (cts) and always observed (obs) parents X,
+% the discrete parents (if any) Q, and this node Y then we use the discrete parent(s) just to index
+% the parameter vectors (c.f., conditional Gaussian nodes); that is:
+% prob(Y=i | X=x, Q=j) = softmax(x,i-th|j)
+% where '|j' means that we are using the j-th (m x n) parameters matrix W(:,:,j).
+% If there are no discrete parents, this is a regular softmax node.
+% If Y is binary, this is a logistic (sigmoid) function.
+%
+% CPD = softmax_CPD(bnet, node_num, ...) will create a softmax CPD with random parameters,
+% where node is the number of a node in this equivalence class.
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+% (Let ns(i) be the size of node i, X = ns(X), Y = ns(Y), Q1=ns(dps(1)), Q2=ns(dps(2)), ...
+% where dps are the discrete parents; if there are no discrete parents, we set Q1=1.)
+%
+% discrete - the discrete parents that we want to treat like the cts ones [ [] ].
+% This can be used to define sigmoid belief network - see below the reference.
+% For example suppose that Y has one cts parents X and two discrete ones: Q, C1 where:
+% -> Q is binary (1/2) and used just to index the parameters of 'self'
+% -> C1 is ternary (1/2/3) and treated as a cts node <=> its values appear into the linear
+% part of the softmax function
+% then:
+% prob(Y|X=x, Q=q, C1=c1)= softmax(W(:,:,q)' * y)
+% where y = [1 | delta(C1,1) delta(C1,2) delta(C1,3) | x(:)']' and delta(Y,a)=indicator(Y=a).
+% weights - (w(:,j,a,b,...) - w(:,j',a,b,...)) is ppn to dec. boundary
+% between j,j' given Q1=a,Q2=b,... [ randn(X,Y,Q1,Q2,...) ]
+% offset - (b(j,a,b,...) - b(j',a,b,...)) is the offset to dec. boundary
+% between j,j' given Q1=a,Q2=b,... [ randn(Y,Q1,Q2,...) ]
+%
+% e.g., CPD = softmax_CPD(bnet, i, 'offset', zeros(ns(i),1));
+%
+% The following fields control the behavior of the M step, which uses
+% a weighted version of the Iteratively Reweighted Least Squares (WIRLS) if dps_as_cps=[]; or
+% a weighted SCG otherwise, as implemented in Netlab, and modified by Pierpaolo Brutti.
+%
+% clamped - 'yes' means don't adjust params during learning ['no']
+% max_iter - the maximum number of steps to take [10]
+% verbose - 'yes' means print the LL at each step of IRLS ['no']
+% wthresh - convergence threshold for weights [1e-2]
+% llthresh - convergence threshold for log likelihood [1e-2]
+% approx_hess - 'yes' means approximate the Hessian for speed ['no']
+%
+% For backwards compatibility with BNT2, you can also specify the parameters in the following order
+% softmax_CPD(bnet, self, w, b, clamped, max_iter, verbose, wthresh, llthresh, approx_hess)
+%
+% REFERENCE
+% For details on the sigmoid belief nets, see:
+% - Neal (1992). Connectionist learning of belief networks, Artificial Intelligence, 56, 71-113.
+% - Saul, Jakkola, Jordan (1996). Mean field theory for sigmoid belief networks, Journal of Artificial Intelligence Reseach (4), pagg. 61-76.
+%
+% For details on the M step, see:
+% - K. Chen, L. Xu, H. Chi (1999). Improved learning algorithms for mixtures of experts in multiclass
+% classification. Neural Networks 12, pp. 1229-1252.
+% - M.I. Jordan, R.A. Jacobs (1994). Hierarchical Mixtures of Experts and the EM algorithm.
+% Neural Computation 6, pp. 181-214.
+% - S.R. Waterhouse, A.J. Robinson (1994). Classification Using Hierarchical Mixtures of Experts. In Proc. IEEE
+% Workshop on Neural Network for Signal Processing IV, pp. 177-186
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'softmax_CPD', discrete_CPD(0, []));
+ return;
+elseif isa(bnet, 'softmax_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+assert(myismember(self, bnet.dnodes));
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, self);
+dps = myintersect(ps, bnet.dnodes);
+cps = myintersect(ps, bnet.cnodes);
+
+clamped = 0;
+CPD = class(CPD, 'softmax_CPD', discrete_CPD(clamped, ns([ps self])));
+
+dps_as_cpssz = 0;
+dps_as_cps = [];
+% determine if any discrete parents are to be treated as cts
+if nargin >= 3 & isstr(varargin{1}) % might have passed in 'discrete'
+ for i=1:2:length(varargin)
+ if strcmp(varargin{i}, 'discrete')
+ dps_as_cps = varargin{i+1};
+ assert(myismember(dps_as_cps, dps));
+ dps = mysetdiff(dps, dps_as_cps); % put out the dps treated as cts
+ CPD.dps_as_cps.ndx = find_equiv_posns(dps_as_cps, ps);
+ CPD.dps_as_cps.separator = [0 cumsum(ns(dps_as_cps(1:end-1)))]; % concatenated dps_as_cps dims separators
+ dps_as_cpssz = sum(ns(dps_as_cps));
+ break;
+ end
+ end
+end
+assert(~isempty(union(cps, dps_as_cps))); % It have to be at least a cts or a dps_as_cps parents
+self_size = ns(self);
+cpsz = sum(ns(cps));
+glimsz = prod(ns(dps));
+CPD.dpndx = find_equiv_posns(dps, ps); % it contains only the indeces of the 'pure' dps
+CPD.cpndx = find_equiv_posns(cps, ps);
+
+CPD.self = self;
+CPD.solo = (length(ns)<=2);
+CPD.sizes = bnet.node_sizes([ps self]);
+
+% set default params
+CPD.max_iter = 10;
+CPD.verbose = 0;
+CPD.wthresh = 1e-2;
+CPD.llthresh = 1e-2;
+CPD.approx_hess = 0;
+CPD.glim = cell(1,glimsz);
+for i=1:glimsz
+ CPD.glim{i} = glm(dps_as_cpssz + cpsz, self_size, 'softmax');
+end
+
+if nargin >= 3
+ args = varargin;
+ nargs = length(args);
+ if ~isstr(args{1})
+ % softmax_CPD(bnet, self, w, b, clamped, max_iter, verbose, wthresh, llthresh, approx_hess)
+ if nargs >= 1 & ~isempty(args{1}), CPD = set_fields(CPD, 'weights', args{1}); end
+ if nargs >= 2 & ~isempty(args{2}), CPD = set_fields(CPD, 'offset', args{2}); end
+ if nargs >= 3 & ~isempty(args{3}), CPD = set_clamped(CPD, args{3}); end
+ if nargs >= 4 & ~isempty(args{4}), CPD.max_iter = args{4}; end
+ if nargs >= 5 & ~isempty(args{5}), CPD.verbose = args{5}; end
+ if nargs >= 6 & ~isempty(args{6}), CPD.wthresh = args{6}; end
+ if nargs >= 7 & ~isempty(args{7}), CPD.llthresh = args{7}; end
+ if nargs >= 8 & ~isempty(args{8}), CPD.approx_hess = args{8}; end
+ else
+ CPD = set_fields(CPD, args{:});
+ end
+end
+
+% sufficient statistics
+% Since dsoftmax is not in the exponential family, we must store all the raw data.
+CPD.parent_vals = []; % X(l,:) = value of cts parents in l'th example
+CPD.self_vals = []; % Y(l,:) = value of self in l'th example
+
+CPD.eso_weights=[]; % weights used by the WIRLS algorithm
+
+% For BIC
+CPD.nsamples = 0;
+if ~adjustable_CPD(CPD),
+ CPD.nparams=0;
+else
+ [W, b] = extract_params(CPD);
+ CPD.nparams= prod(size(W)) + prod(size(b));
+end
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.glim = {};
+CPD.self = [];
+CPD.solo = [];
+CPD.max_iter = [];
+CPD.verbose = [];
+CPD.wthresh = [];
+CPD.llthresh = [];
+CPD.approx_hess = [];
+CPD.sizes = [];
+CPD.parent_vals = [];
+CPD.eso_weights=[];
+CPD.self_vals = [];
+CPD.nsamples = [];
+CPD.nparams = [];
+CPD.dpndx = [];
+CPD.cpndx = [];
+CPD.dps_as_cps.ndx = [];
+CPD.dps_as_cps.separator = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/update_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/update_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,97 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a softmax node
+% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+%
+% fmarginal = overall posterior distribution of self and its parents
+% fmarginal(i1,i2...,ik,s)=prob(Pa1=i1,...,Pak=ik, self=s| X)
+%
+% => 1) prob(self|Pa1,...,Pak)=fmarginal/prob(Pa1,...,Pak) with prob(Pa1,...,Pak)=sum{s,fmarginal}
+% [self estimation -> CPD.self_vals]
+% 2) prob(Pa1,...,Pak) [WIRLS weights -> CPD.eso_weights]
+%
+% Hidden_bitv is ignored
+
+% Written by Pierpaolo Brutti
+
+if ~adjustable_CPD(CPD), return; end
+
+domain = fmarginal.domain;
+self = domain(end);
+ps = domain(1:end-1);
+cnodes = domain(CPD.cpndx);
+cps = myintersect(domain, cnodes);
+dps = mysetdiff(ps, cps);
+dn_use = dps;
+if isempty(evidence{self}) dn_use = [dn_use self]; end % if self is hidden we must consider its dimension
+dps_as_cps = domain(CPD.dps_as_cps.ndx);
+odom = domain(~isemptycell(evidence(domain)));
+
+ns = zeros(1, max(domain));
+ns(domain) = CPD.sizes; % CPD.sizes = bnet.node_sizes([ps self]);
+ens = ns; % effective node sizes
+ens(odom) = 1;
+dpsize = prod(ns(dps));
+
+% Extract the params compatible with the observations (if any) on the discrete parents (if any)
+dops = myintersect(dps, odom);
+dpvals = cat(1, evidence{dops});
+
+subs = ind2subv(ens(dn_use), 1:prod(ens(dn_use)));
+dpmap = find_equiv_posns(dops, dn_use);
+if ~isempty(dpmap), subs(:,dpmap) = subs(:,dpmap)+repmat(dpvals(:)',[size(subs,1) 1])-1; end
+supportedQs = subv2ind(ns(dn_use), subs); subs=subs(1:prod(ens(dps)),1:length(dps));
+Qarity = prod(ns(dn_use));
+if isempty(dn_use), Qarity = 1; end
+
+fullm.T = zeros(Qarity, 1);
+fullm.T(supportedQs) = fmarginal.T(:);
+rs_dim = CPD.sizes; rs_dim(CPD.cpndx) = 1; %
+if ~isempty(evidence{self}), rs_dim(end)=1; end % reshaping the marginal
+fullm.T = reshape(fullm.T, rs_dim); %
+
+% --------------------------------------------------------------------------------UPDATE--
+
+CPD.nsamples = CPD.nsamples + 1;
+
+% 1) observations vector -> CPD.parents_vals ---------------------------------------------
+cpvals = cat(1, evidence{cps});
+
+if ~isempty(dps_as_cps), % ...get in the dp_as_cp parents...
+ separator = CPD.dps_as_cps.separator;
+ dp_as_cpmap = find_equiv_posns(dps_as_cps, dps);
+ for i=1:dpsize,
+ dp_as_cpvals=zeros(1,sum(ns(dps_as_cps)));
+ possible_vals = ind2subv(ns(dps),i);
+ ll=find(ismember(subs(:,dp_as_cpmap), possible_vals(dp_as_cpmap), 'rows')==1);
+ if ~isempty(ll),
+ where_one = separator + possible_vals(dp_as_cpmap);
+ dp_as_cpvals(where_one)=1;
+ end
+ CPD.parent_vals(CPD.nsamples,:,i) = [dp_as_cpvals(:); cpvals(:)]';
+ end
+else
+ CPD.parent_vals(CPD.nsamples,:) = cpvals(:)';
+end
+
+% 2) weights vector -> CPD.eso_weights ----------------------------------------------------
+if isempty(evidence{self}), % self is hidden
+ pesi=reshape(sum(fullm.T, length(rs_dim)),[dpsize,1]);
+else
+ pesi=reshape(fullm.T,[dpsize,1]);
+end
+assert(approxeq(sum(pesi),1)); % check
+
+% 3) estimate (if R is hidden) or recover (if R is obs) self'value-------------------------
+if isempty(evidence{self}) % P(self|Pa1,...,Pak)=fmarginal/prob(Pa1,...,Pak)
+ r=reshape(mk_stochastic(fullm.T), [dpsize ns(self)]); % matrix size: prod{j,ns(Paj)} x ns(self)
+else
+ r = zeros(dpsize,ns(self));
+ for i=1:dpsize, if pesi(i)~=0, r(i,evidence{self}) = 1; end; end
+end
+for i=1:dpsize, if pesi(i)~=0, assert(approxeq(sum(r(i,:)),1)); end; end % check
+
+% 4) save the previous values --------------------------------------------------------------
+for i=1:dpsize
+ CPD.eso_weights(CPD.nsamples,:,i)=pesi(i);
+ CPD.self_vals(CPD.nsamples,:,i) = r(i,:);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CPD_to_CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CPD_to_CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function CPT = CPD_to_CPT(CPD)
+% CPD_TO_CPT Convert the discrete CPD to tabular form (tabular)
+% CPT = CPD_to_CPT(CPD)
+
+CPT = CPD.CPT;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+/CPD_to_CPT.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/bayes_update_params.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/display.m/1.1.1.1/Tue Apr 22 21:00:02 2003//
+/get_field.m/1.1.1.1/Sun Jan 16 02:27:30 2005//
+/learn_params.m/1.1.1.1/Thu Jun 10 01:25:02 2004//
+/log_marg_prob_node.m/1.1.1.1/Fri Jun 11 21:16:00 2004//
+/log_nextcase_prob_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/log_prior.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/maximize_params.m/1.1.1.1/Sun Mar 9 22:44:40 2003//
+/reset_ess.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/set_fields.m/1.1.1.1/Sun Jan 16 02:27:30 2005//
+/tabular_CPD.m/1.1.1.1/Sun Jan 16 02:27:32 2005//
+/update_ess.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/update_ess_simple.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@tabular_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/BIC_score_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/BIC_score_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function score = BIC_score_CPD(CPD, fam, data, ns, cnodes)
+% BIC_score_CPD Compute the BIC score of a tabular CPD
+% score = BIC_score_CPD(CPD, fam, data, ns, cnodes)
+
+if iscell(data)
+ local_data = cell2num(data(fam,:));
+else
+ local_data = data(fam, :);
+end
+counts = compute_counts(local_data, CPD.sizes);
+CPT = mk_stochastic(counts); % MLE
+tiny = exp(-700);
+CPT = CPT + (CPT==0)*tiny; % replace 0s by tiny
+LL = sum(log(CPT(:)) .* counts(:));
+N = size(data, 2);
+score = LL - 0.5*CPD.nparams*log(N);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+/BIC_score_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/bayesian_score_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/log_marg_prob_node_case.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mult_CPD_and_pi_msgs.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/prob_CPT.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/prob_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_node_single_case.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/tabular_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/update_params.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@tabular_CPD/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/bayesian_score_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/bayesian_score_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function score = bayesian_score_CPD(CPD, local_ev)
+% bayesian_score_CPD Compute the Bayesian score of a tabular CPD using uniform Dirichlet prior
+% score = bayesian_score_CPD(CPD, local_ev)
+%
+% The Bayesian score is the log marginal likelihood
+
+if iscell(local_ev)
+ data = num2cell(local_ev);
+else
+ data = local_ev;
+end
+
+score = dirichlet_score_family(compute_counts(data, CPD.sizes));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/log_marg_prob_node_case.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/log_marg_prob_node_case.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function L = log_marg_prob_node_case(CPD, y, x)
+% LOG_MARG_PROB_NODE_CASE Compute prod_m log P(x(i,m)| x(pi_i,m)) for node i (tabular)
+% L = log_marg_prob_node_case(CPD, self_ev, parent_ev)
+%
+% This is a slightly optimised version of log_marg_prob_node.
+% We assume we have exactly 1 case, i.e., y is a scalar and x is a vector (not a cell array).
+
+sz = CPD.sizes;
+nparents = length(sz)-1;
+
+% We assume the CPTs are already set to the mean of the posterior (due to update_params)
+
+switch nparents
+ case 0, p = CPD.CPT(y);
+ case 1, p = CPD.CPT(x(1), y);
+ case 2, p = CPD.CPT(x(1), x(2), y);
+ case 3, p = CPD.CPT(x(1), x(2), x(3), y);
+ otherwise,
+ ind = subv2ind(sz, [x y]);
+ p = CPD.CPT(ind);
+end
+L = log(p);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/mult_CPD_and_pi_msgs.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/mult_CPD_and_pi_msgs.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function T = mult_CPD_and_pi_msgs(CPD, n, ps, msgs, except)
+% MULT_CPD_AND_PI_MSGS Multiply the CPD and all the pi messages from parents, perhaps excepting one
+% T = mult_CPD_and_pi_msgs(CPD, n, ps, msgs, except)
+
+if nargin < 5, except = -1; end
+
+dom = [ps n];
+%ns = sparse(1, max(dom));
+ns = zeros(1, max(dom));
+ns(dom) = mysize(CPD.CPT);
+T = dpot(dom, ns(dom), CPD.CPT);
+for i=1:length(ps)
+ p = ps(i);
+ if p ~= except
+ T = multiply_by_pot(T, dpot(p, ns(p), msgs{n}.pi_from_parent{i}.T));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/prob_CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/prob_CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function p = prob_CPT(CPD, x)
+% PROB_CPT Lookup the prob. of a family value in a tabular CPD
+% p = prob_CPT(CPD, x)
+%
+% This is a version of prob_CPD optimized for tables.
+
+switch length(x)
+ case 1, p = CPD.CPT(x);
+ case 2, p = CPD.CPT(x(1), x(2));
+ case 3, p = CPD.CPT(x(1), x(2), x(3));
+ case 4, p = CPD.CPT(x(1), x(2), x(3), x(4));
+ otherwise,
+ ind = subv2ind(mysize(CPD.CPT), x);
+ p = CPD.CPT(ind);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function p = prob_node(CPD, self_ev, pev)
+% PROB_NODE Compute P(y|pa(y), theta) (tabular)
+% p = prob_node(CPD, self_ev, pev)
+%
+% self_ev{m} is the evidence on this node in case m
+% pev{i,m} is the evidence on the i'th parent in case m
+% If there is a single case, self_ev can be a scalar instead of a cell array
+
+ncases = size(pev, 2);
+
+%assert(~any(isemptycell(pev))); % slow
+%assert(~any(isemptycell(self_ev))); % slow
+
+CPT = CPD_to_CPT(CPD);
+sz = mysize(CPT);
+nparents = length(sz)-1;
+assert(nparents == size(pev, 1));
+
+if ncases==1
+ x = cat(1, pev{:});
+ if iscell(y)
+ y = self_ev{1};
+ else
+ y = self_ev;
+ end
+ switch nparents
+ case 0, p = CPT(y);
+ case 1, p = CPT(x(1), y);
+ case 2, p = CPT(x(1), x(2), y);
+ case 3, p = CPT(x(1), x(2), x(3), y);
+ otherwise,
+ ind = subv2ind(CPD.sizes, [x y]);
+ p = CPT(ind);
+ end
+else
+ x = num2cell(pev)'; % each row is a case
+ y = cat(1, self_ev{:})';
+ ind = subv2ind(CPD.sizes, [x y]);
+ p = CPT(ind);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/sample_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/sample_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+function y = sample_node(CPD, pev, nsamples)
+% SAMPLE_NODE Draw a random sample from P(Xi | x(pi_i), theta_i) (tabular)
+% Y = SAMPLE_NODE(CPD, PEV, NSAMPLES)
+%
+% pev(i,m) is the value of the i'th parent in sample m (if there are any parents).
+% y(m) is the m'th sampled value (a row vector).
+% (If pev is a cell array, so is y.)
+% nsamples defaults to 1.
+
+if nargin < 3, nsamples = 1; end
+
+%if nargin < 4, usecell = 0; end
+if iscell(pev), usecell = 1; else usecell = 0; end
+
+if nsamples == 1, pev = pev(:); end
+
+sz = CPD.sizes;
+nparents = length(sz)-1;
+if nparents==0
+ y = sample_discrete(CPD.CPT, 1, nsamples);
+ if usecell
+ y = num2cell(y);
+ end
+ return;
+end
+
+sz = CPD.sizes;
+[nparents nsamples] = size(pev);
+
+if usecell
+ pvals = cell2num(pev)'; % each row is a case
+else
+ pvals = pev';
+end
+
+psz = sz(1:end-1);
+ssz = sz(end);
+ndx = subv2ind(psz, pvals);
+T = reshape(CPD.CPT, [prod(psz) ssz]);
+T2 = T(ndx,:); % each row is a distribution selected by the parents
+C = cumsum(T2, 2); % sum across columns
+R = rand(nsamples, 1);
+y = ones(nsamples, 1);
+for i=1:ssz-1
+ y = y + (R > C(:,i));
+end
+y = y(:)';
+if usecell
+ y = num2cell(y);
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/sample_node_single_case.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/sample_node_single_case.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function y = sample_node(CPD, pev)
+% SAMPLE_NODE Draw a random sample from P(Xi | x(pi_i), theta_i) (tabular)
+% y = sample_node(CPD, pev)
+%
+% pev{i} is the value of the i'th parent (if any)
+
+%assert(~any(isemptycell(pev)));
+
+%CPT = CPD_to_CPT(CPD);
+%sz = mysize(CPT);
+sz = CPD.sizes;
+nparents = length(sz)-1;
+if nparents > 0
+ pvals = cat(1, pev{:});
+end
+switch nparents
+ case 0, T = CPD.CPT;
+ case 1, T = CPD.CPT(pvals(1), :);
+ case 2, T = CPD.CPT(pvals(1), pvals(2), :);
+ case 3, T = CPD.CPT(pvals(1), pvals(2), pvals(3), :);
+ case 4, T = CPD.CPT(pvals(1), pvals(2), pvals(3), pvals(4), :);
+ otherwise,
+ psz = sz(1:end-1);
+ ssz = sz(end);
+ i = subv2ind(psz, pvals(:)');
+ T = reshape(CPD.CPT, [prod(psz) ssz]);
+ T = T(i,:);
+end
+
+if sz(end)==2
+ r = rand(1,1);
+ if r > T(1)
+ y = 2;
+ else
+ y = 1;
+ end
+else
+ y = sample_discrete(T);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/tabular_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/tabular_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,186 @@
+function CPD = tabular_CPD(bnet, self, varargin)
+% TABULAR_CPD Make a multinomial conditional prob. distrib. (CPT)
+%
+% CPD = tabular_CPD(bnet, node) creates a random CPT.
+%
+% The following arguments can be specified [default in brackets]
+%
+% CPT - specifies the params ['rnd']
+% - T means use table T; it will be reshaped to the size of node's family.
+% - 'rnd' creates rnd params (drawn from uniform)
+% - 'unif' creates a uniform distribution
+% - 'leftright' only transitions from i to i/i+1 are allowed, for each non-self parent context.
+% The non-self parents are all parents except oldself.
+% selfprob - The prob of transition from i to i if CPT = 'leftright' [0.1]
+% old_self - id of the node corresponding to self in the previous slice [self-ss]
+% adjustable - 0 means don't adjust the parameters during learning [1]
+% prior_type - defines type of prior ['none']
+% - 'none' means do ML estimation
+% - 'dirichlet' means add pseudo-counts to every cell
+% - 'entropic' means use a prior P(theta) propto exp(-H(theta)) (see Brand)
+% dirichlet_weight - equivalent sample size (ess) of the dirichlet prior [1]
+% dirichlet_type - defines the type of Dirichlet prior ['BDeu']
+% - 'unif' means put dirichlet_weight in every cell
+% - 'BDeu' means we put 'dirichlet_weight/(r q)' in every cell
+% where r = self_sz and q = prod(parent_sz) (see Heckerman)
+% trim - 1 means trim redundant params (rows in CPT) when using entropic prior [0]
+%
+% e.g., tabular_CPD(bnet, i, 'CPT', T)
+% e.g., tabular_CPD(bnet, i, 'CPT', 'unif', 'dirichlet_weight', 2, 'dirichlet_type', 'unif')
+%
+% REFERENCES
+% M. Brand - "Structure learning in conditional probability models via an entropic prior
+% and parameter extinction", Neural Computation 11 (1999): 1155--1182
+% M. Brand - "Pattern discovery via entropy minimization" [covers annealing]
+% AI & Statistics 1999. Equation numbers refer to this paper, which is available from
+% www.merl.com/reports/docs/TR98-21.pdf
+% D. Heckerman, D. Geiger and M. Chickering,
+% "Learning Bayesian networks: the combination of knowledge and statistical data",
+% Microsoft Research Tech Report, 1994
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'tabular_CPD', discrete_CPD(0, []));
+ return;
+elseif isa(bnet, 'tabular_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, self);
+fam_sz = ns([ps self]);
+CPD.sizes = fam_sz;
+CPD.leftright = 0;
+
+% set defaults
+CPD.CPT = mk_stochastic(myrand(fam_sz));
+CPD.adjustable = 1;
+CPD.prior_type = 'none';
+dirichlet_type = 'BDeu';
+dirichlet_weight = 1;
+CPD.trim = 0;
+selfprob = 0.1;
+
+% extract optional args
+args = varargin;
+% check for old syntax CPD(bnet, i, CPT) as opposed to CPD(bnet, i, 'CPT', CPT)
+if ~isempty(args) & ~isstr(args{1})
+ CPD.CPT = myreshape(args{1}, fam_sz);
+ args = [];
+end
+
+% if old_self is specified, read in the value before CPT is created
+old_self = [];
+for i=1:2:length(args)
+ switch args{i},
+ case 'old_self', old_self = args{i+1};
+ end
+end
+
+for i=1:2:length(args)
+ switch args{i},
+ case 'CPT',
+ T = args{i+1};
+ if ischar(T)
+ switch T
+ case 'unif', CPD.CPT = mk_stochastic(myones(fam_sz));
+ case 'rnd', CPD.CPT = mk_stochastic(myrand(fam_sz));
+ case 'leftright',
+ % we just initialise the CPT to leftright - this structure will
+ % be maintained by EM, assuming we don't use a prior...
+ CPD.leftright = 1;
+ if isempty(old_self) % we assume the network is a DBN
+ ss = bnet.nnodes_per_slice;
+ old_self = self-ss;
+ end
+ other_ps = mysetdiff(ps, old_self);
+ Qps = prod(ns(other_ps));
+ Q = ns(self);
+ p = selfprob;
+ LR = mk_leftright_transmat(Q, p);
+ transprob = repmat(reshape(LR, [1 Q Q]), [Qps 1 1]); % transprob(k,i,j)
+ transprob = permute(transprob, [2 1 3]); % now transprob(i,k,j)
+ CPD.CPT = myreshape(transprob, fam_sz);
+ otherwise, error(['invalid CPT ' T]);
+ end
+ else
+ CPD.CPT = myreshape(T, fam_sz);
+ end
+
+ case 'prior_type', CPD.prior_type = args{i+1};
+ case 'dirichlet_type', dirichlet_type = args{i+1};
+ case 'dirichlet_weight', dirichlet_weight = args{i+1};
+ case 'adjustable', CPD.adjustable = args{i+1};
+ case 'clamped', CPD.adjustable = ~args{i+1};
+ case 'trim', CPD.trim = args{i+1};
+ case 'old_self', noop = 1; % already read in
+ otherwise, error(['invalid argument name: ' args{i}]);
+ end
+end
+
+switch CPD.prior_type
+ case 'dirichlet',
+ switch dirichlet_type
+ case 'unif', CPD.dirichlet = dirichlet_weight * myones(fam_sz);
+ case 'BDeu', CPD.dirichlet = dirichlet_weight * mk_stochastic(myones(fam_sz));
+ otherwise, error(['invalid dirichlet_type ' dirichlet_type])
+ end
+ case {'entropic', 'none'}
+ CPD.dirichlet = [];
+ otherwise, error(['invalid prior_type ' prior_type])
+end
+
+
+
+% fields to do with learning
+if ~CPD.adjustable
+ CPD.counts = [];
+ CPD.nparams = 0;
+ CPD.nsamples = [];
+else
+ CPD.counts = zeros(size(CPD.CPT));
+ psz = fam_sz(1:end-1);
+ ss = fam_sz(end);
+ if CPD.leftright
+ % For each of the Qps contexts, we specify Q elements on the diagoanl
+ CPD.nparams = Qps * Q;
+ else
+ % sum-to-1 constraint reduces the effective arity of the node by 1
+ CPD.nparams = prod([psz ss-1]);
+ end
+ CPD.nsamples = 0;
+end
+
+fam_sz = CPD.sizes;
+psz = prod(fam_sz(1:end-1));
+ssz = fam_sz(end);
+CPD.trimmed_trans = zeros(psz, ssz); % must declare before reading
+
+CPD = class(CPD, 'tabular_CPD', discrete_CPD(~CPD.adjustable, fam_sz));
+
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.CPT = [];
+CPD.sizes = [];
+CPD.prior_type = [];
+CPD.dirichlet = [];
+CPD.adjustable = [];
+CPD.counts = [];
+CPD.nparams = [];
+CPD.nsamples = [];
+CPD.trim = [];
+CPD.trimmed_trans = [];
+CPD.leftright = [];
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/update_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/Old/update_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function CPD = update_params(CPD, ev, counts)
+% UPDATE_PARAMS Update the Dirichlet pseudo counts and compute the new MAP param estimates (tabular)
+%
+% CPD = update_params(CPD, ev) uses the evidence on the family from a single case.
+%
+% CPD = update_params(CPD, [], counts) does a batch update using the specified suff. stats.
+
+if nargin < 3
+ n = length(ev);
+ data = cat(1, ev{:}); % convert to a vector of scalars
+ counts = compute_counts(data(:)', 1:n, mysize(CPD.CPT));
+end
+
+CPD.prior = CPD.prior + counts;
+CPD.CPT = mk_stochastic(CPD.prior);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/bayes_update_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/bayes_update_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+function CPD = bayes_update_params(CPD, self_ev, pev)
+% UPDATE_PARAMS_COMPLETE Bayesian parameter updating given completely observed data (tabular)
+% CPD = update_params_complete(CPD, self_ev, pev)
+%
+% self_ev(m) is the evidence on this node in case m.
+% pev(i,m) is the evidence on the i'th parent in case m (if there are any parents).
+% These can be arrays or cell arrays.
+%
+% We update the Dirichlet pseudo counts and set the CPT to the mean of the posterior.
+
+if iscell(self_ev), usecell = 1; else usecell = 0; end
+
+ncases = length(self_ev);
+sz = CPD.sizes;
+nparents = length(sz)-1;
+assert(nparents == size(pev,1));
+
+if ncases == 0 | ~adjustable_CPD(CPD)
+ return;
+elseif ncases == 1 % speedup the sequential learning case by avoiding normalization of the whole array
+ if usecell
+ x = cat(1, pev{:})';
+ y = self_ev{1};
+ else
+ x = pev(:)';
+ y = self_ev;
+ end
+ switch nparents
+ case 0,
+ CPD.dirichlet(y) = CPD.dirichlet(y)+1;
+ CPD.CPT = CPD.dirichlet / sum(CPD.dirichlet);
+ case 1,
+ CPD.dirichlet(x(1), y) = CPD.dirichlet(x(1), y)+1;
+ CPD.CPT(x(1), :) = CPD.dirichlet(x(1), :) ./ sum(CPD.dirichlet(x(1), :));
+ case 2,
+ CPD.dirichlet(x(1), x(2), y) = CPD.dirichlet(x(1), x(2), y)+1;
+ CPD.CPT(x(1), x(2), :) = CPD.dirichlet(x(1), x(2), :) ./ sum(CPD.dirichlet(x(1), x(2), :));
+ case 3,
+ CPD.dirichlet(x(1), x(2), x(3), y) = CPD.dirichlet(x(1), x(2), x(3), y)+1;
+ CPD.CPT(x(1), x(2), x(3), :) = CPD.dirichlet(x(1), x(2), x(3), :) ./ sum(CPD.dirichlet(x(1), x(2), x(3), :));
+ otherwise,
+ ind = subv2ind(sz, [x y]);
+ CPD.dirichlet(ind) = CPD.dirichlet(ind) + 1;
+ CPD.CPT = mk_stochastic(CPD.dirichlet);
+ end
+else
+ if usecell
+ data = [cell2num(pev); cell2num(self_ev)];
+ else
+ data = [pev; self_ev];
+ end
+ counts = compute_counts(data, sz);
+ CPD.dirichlet = CPD.dirichlet + counts;
+ CPD.CPT = mk_stochastic(CPD.dirichlet);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function display(CPD)
+
+disp('tabular_CPD object');
+disp(struct(CPD));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/get_field.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/get_field.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function val = get_field(CPD, name)
+% GET_PARAMS Get the parameters (fields) for a tabular_CPD object
+% val = get_params(CPD, name)
+%
+% The following fields can be accessed
+%
+% cpt, counts
+%
+% e.g., CPT = get_params(CPD, 'cpt')
+
+switch name
+ case 'cpt', val = CPD.CPT;
+ case 'counts', val = CPD.counts;
+ otherwise,
+ error(['invalid argument name ' name]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/learn_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/learn_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function CPD = learn_params(CPD, fam, data, ns, cnodes)
+%function CPD = learn_params(CPD, local_data)
+% LEARN_PARAMS Compute the ML/MAP estimate of the params of a tabular CPD given complete data
+% CPD = learn_params(CPD, local_data)
+%
+% local_data(i,m) is the value of i'th family member in case m (can be cell array).
+
+local_data = data(fam, :);
+if iscell(local_data)
+ local_data = cell2num(local_data);
+end
+counts = compute_counts(local_data, CPD.sizes);
+switch CPD.prior_type
+ case 'none', CPD.CPT = mk_stochastic(counts);
+ case 'dirichlet', CPD.CPT = mk_stochastic(counts + CPD.dirichlet);
+ otherwise, error(['unrecognized prior ' CPD.prior_type])
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/log_marg_prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/log_marg_prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,69 @@
+function L = log_marg_prob_node(CPD, self_ev, pev, usecell)
+% LOG_MARG_PROB_NODE Compute sum_m log P(x(i,m)| x(pi_i,m)) for node i (tabular)
+% L = log_marg_prob_node(CPD, self_ev, pev)
+%
+% This differs from log_prob_node because we integrate out the parameters.
+% self_ev(m) is the evidence on this node in case m.
+% pev(i,m) is the evidence on the i'th parent in case m (if there are any parents).
+% (These may also be cell arrays.)
+
+ncases = length(self_ev);
+sz = CPD.sizes;
+nparents = length(sz)-1;
+assert(ncases == size(pev, 2));
+
+if nargin < 4
+ %usecell = 0;
+ if iscell(self_ev)
+ usecell = 1;
+ else
+ usecell = 0;
+ end
+end
+
+
+if ncases==0
+ L = 0;
+ return;
+elseif ncases==1 % speedup the sequential learning case
+ CPT = CPD.CPT;
+ % We assume the CPTs are already set to the mean of the posterior (due to bayes_update_params)
+ if usecell
+ x = cat(1, pev{:})';
+ y = self_ev{1};
+ else
+ %x = pev(:)';
+ x = pev;
+ y = self_ev;
+ end
+ switch nparents
+ case 0, p = CPT(y);
+ case 1, p = CPT(x(1), y);
+ case 2, p = CPT(x(1), x(2), y);
+ case 3, p = CPT(x(1), x(2), x(3), y);
+ otherwise,
+ ind = subv2ind(sz, [x y]);
+ p = CPT(ind);
+ end
+ L = log(p);
+else
+ % We ignore the CPTs here and assume the prior has not been changed
+
+ % We arrange the data as in the following example.
+ % Let there be 2 parents and 3 cases. Let p(i,m) be parent i in case m,
+ % and y(m) be the child in case m. Then we create the data matrix
+ %
+ % p(1,1) p(1,2) p(1,3)
+ % p(2,1) p(2,2) p(2,3)
+ % y(1) y(2) y(3)
+ if usecell
+ data = [cell2num(pev); cell2num(self_ev)];
+ else
+ data = [pev; self_ev];
+ end
+ %S = struct(CPD); fprintf('log marg prob node %d, ps\n', S.self); disp(S.parents)
+ counts = compute_counts(data, sz);
+ L = dirichlet_score_family(counts, CPD.dirichlet);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/log_nextcase_prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/log_nextcase_prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,72 @@
+function L = log_nextcase_prob_node(CPD, self_ev, pev, test_self_ev, test_pev)
+% LOG_NEXTCASE_PROB_NODE compute the joint distribution of a node (tabular) of a new case given
+% completely observed data.
+%
+% The input arguments are mainly similar with log_marg_prob_node(CPD, self_ev, pev, usecell),
+% but add test_self_ev, test_pev, and without usecell
+% test_self_ev(m) is the evidence on this node in a test case.
+% test_pev(i) is the evidence on the i'th parent in the test case (if there are any parents).
+%
+% Written by qian.diao@intel.com
+
+ncases = length(self_ev);
+sz = CPD.sizes;
+nparents = length(sz)-1;
+assert(ncases == size(pev, 2));
+
+if nargin < 6
+ %usecell = 0;
+ if iscell(self_ev)
+ usecell = 1;
+ else
+ usecell = 0;
+ end
+end
+
+
+if ncases==0
+ L = 0;
+ return;
+elseif ncases==1 % speedup the sequential learning case; here need correction!!!
+ CPT = CPD.CPT;
+ % We assume the CPTs are already set to the mean of the posterior (due to bayes_update_params)
+ if usecell
+ x = cat(1, pev{:})';
+ y = self_ev{1};
+ else
+ %x = pev(:)';
+ x = pev;
+ y = self_ev;
+ end
+ switch nparents
+ case 0, p = CPT(y);
+ case 1, p = CPT(x(1), y);
+ case 2, p = CPT(x(1), x(2), y);
+ case 3, p = CPT(x(1), x(2), x(3), y);
+ otherwise,
+ ind = subv2ind(sz, [x y]);
+ p = CPT(ind);
+ end
+ L = log(p);
+else
+ % We ignore the CPTs here and assume the prior has not been changed
+
+ % We arrange the data as in the following example.
+ % Let there be 2 parents and 3 cases. Let p(i,m) be parent i in case m,
+ % and y(m) be the child in case m. Then we create the data matrix
+ %
+ % p(1,1) p(1,2) p(1,3)
+ % p(2,1) p(2,2) p(2,3)
+ % y(1) y(2) y(3)
+ if usecell
+ data = [cell2num(pev); cell2num(self_ev)];
+ else
+ data = [pev; self_ev];
+ end
+ counts = compute_counts(data, sz);
+
+ % compute the (N_ijk'+ N_ijk)/(N_ij' + N_ij) under the condition of 1_m+1,ijk = 1
+ L = predict_family(counts, CPD.prior, test_self_ev, test_pev);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/log_prior.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/log_prior.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function L = log_prior(CPD)
+% LOG_PRIOR Return log P(theta) for a tabular CPD
+% L = log_prior(CPD)
+
+switch CPD.prior_type
+ case 'none',
+ L = 0;
+ case 'dirichlet',
+ D = CPD.dirichlet(:);
+ L = sum(log(D + (D==0)));
+ case 'entropic',
+ % log-prior = log exp(-H(theta)) = sum_i theta_i log (theta_i)
+ fam_sz = CPD.sizes;
+ psz = prod(fam_sz(1:end-1));
+ ssz = fam_sz(end);
+ C = reshape(CPD.CPT, psz, ssz);
+ L = sum(sum(C .* log(C + (C==0))));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/maximize_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/maximize_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function CPD = maximize_params(CPD, temp)
+% MAXIMIZE_PARAMS Set the params of a tabular node to their ML/MAP values.
+% CPD = maximize_params(CPD, temp)
+
+if ~adjustable_CPD(CPD), return; end
+
+%assert(approxeq(sum(CPD.counts(:)), CPD.nsamples)); % false!
+switch CPD.prior_type
+ case 'none',
+ counts = reshape(CPD.counts, size(CPD.CPT));
+ CPD.CPT = mk_stochastic(counts);
+ case 'dirichlet',
+ counts = reshape(CPD.counts, size(CPD.CPT));
+ CPD.CPT = mk_stochastic(counts + CPD.dirichlet);
+
+ % case 'entropic',
+% % For an HMM,
+% % CPT(i,j) = pr(X(t)=j | X(t-1)=i) = transprob(i,j)
+% % counts(i,j) = E #(X(t-1)=i, X(t)=j) = exp_num_trans(i,j)
+% Z = 1-temp;
+% fam_sz = CPD.sizes;
+% psz = prod(fam_sz(1:end-1));
+% ssz = fam_sz(end);
+% counts = reshape(CPD.counts, psz, ssz);
+% CPT = zeros(psz, ssz);
+% for i=CPD.entropic_pcases(:)'
+% [CPT(i,:), logpost] = entropic_map_estimate(counts(i,:), Z);
+% end
+% non_entropic_pcases = mysetdiff(1:psz, CPD.entropic_pcases);
+% for i=non_entropic_pcases(:)'
+% CPT(i,:) = mk_stochastic(counts(i,:));
+% end
+% %for i=1:psz
+% % [CPT(i,:), logpost] = entropic_map(counts(i,:), Z);
+% %end
+% if CPD.trim & (temp < 2) % at high temps, we would trim everything!
+% % grad(j) = d log lik / d theta(i ->j)
+% % CPT(i,j) = 0 => counts(i,j) = 0
+% % so we can safely replace 0s by 1s in the denominator
+% denom = CPT(i,:) + (CPT(i,:)==0);
+% grad = counts(i,:) ./ denom;
+% trim = find(CPT(i,:) <= exp(-(1/Z)*grad)); % eqn 32
+% if ~isempty(trim)
+% CPT(i,trim) = 0;
+% if all(CPD.trimmed_trans(i,trim)==0) % trimming for 1st time
+% disp(['trimming CPT(' num2str(i) ',' num2str(trim) ')'])
+% end
+% CPD.trimmed_trans(i,trim) = 1;
+% end
+% end
+% CPD.CPT = myreshape(CPT, CPD.sizes);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/reset_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/reset_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function CPD = reset_ess(CPD)
+% RESET_ESS Reset the Expected Sufficient Statistics of a tabular node.
+% CPD = reset_ess(CPD)
+
+%CPD.counts = zeros(size(CPD.CPT));
+CPD.counts = zeros(prod(size(CPD.CPT)), 1);
+CPD.nsamples = 0;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/set_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/set_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function CPD = set_fields(CPD, varargin)
+% SET_PARAMS Set the parameters (fields) for a tabular_CPD object
+% CPD = set_params(CPD, name/value pairs)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+%
+% CPT, prior, clamped, counts
+%
+% e.g., CPD = set_params(CPD, 'CPT', 'rnd')
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'CPT',
+ if ischar(args{i+1})
+ switch args{i+1}
+ case 'unif', CPD.CPT = mk_stochastic(myones(CPD.sizes));
+ case 'rnd', CPD.CPT = mk_stochastic(myrand(CPD.sizes));
+ otherwise, error(['invalid type ' args{i+1}]);
+ end
+ elseif isscalarBNT(args{i+1})
+ p = args{i+1};
+ k = CPD.sizes(end);
+ % Bug fix by Hervé Boutrouille 10/1/01
+ CPD.CPT = myreshape(sample_dirichlet(p*ones(1,k), prod(CPD.sizes(1:end-1)), CPD.sizes));
+ %CPD.CPT = myreshape(sample_dirichlet(p*ones(1,k), prod(CPD.sizes(1:end-1))), CPD.sizes);
+ else
+ CPD.CPT = myreshape(args{i+1}, CPD.sizes);
+ end
+
+ case 'prior',
+ if ischar(args{i+1}) & strcmp(args{i+1}, 'unif')
+ CPD.prior = myones(CPD.sizes);
+ elseif isscalarBNT(args{i+1})
+ CPD.prior = args{i+1} * normalise(myones(CPD.sizes));
+ else
+ CPD.prior = myreshape(args{i+1}, CPD.sizes);
+ end
+
+ %case 'clamped', CPD.clamped = strcmp(args{i+1}, 'yes');
+ %case 'clamped', CPD = set_clamped(CPD, strcmp(args{i+1}, 'yes'));
+ case 'clamped', CPD = set_clamped(CPD, args{i+1});
+
+ case 'counts', CPD.counts = args{i+1};
+
+ otherwise,
+ %error(['invalid argument name ' args{i}]);
+ end
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/tabular_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/tabular_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,173 @@
+function CPD = tabular_CPD(bnet, self, varargin)
+% TABULAR_CPD Make a multinomial conditional prob. distrib. (CPT)
+%
+% CPD = tabular_CPD(bnet, node) creates a random CPT.
+%
+% The following arguments can be specified [default in brackets]
+%
+% CPT - specifies the params ['rnd']
+% - T means use table T; it will be reshaped to the size of node's family.
+% - 'rnd' creates rnd params (drawn from uniform)
+% - 'unif' creates a uniform distribution
+% adjustable - 0 means don't adjust the parameters during learning [1]
+% prior_type - defines type of prior ['none']
+% - 'none' means do ML estimation
+% - 'dirichlet' means add pseudo-counts to every cell
+% - 'entropic' means use a prior P(theta) propto exp(-H(theta)) (see Brand)
+% dirichlet_weight - equivalent sample size (ess) of the dirichlet prior [1]
+% dirichlet_type - defines the type of Dirichlet prior ['BDeu']
+% - 'unif' means put dirichlet_weight in every cell
+% - 'BDeu' means we put 'dirichlet_weight/(r q)' in every cell
+% where r = self_sz and q = prod(parent_sz) (see Heckerman)
+% trim - 1 means trim redundant params (rows in CPT) when using entropic prior [0]
+% entropic_pcases - list of assignments to the parents nodes when we should use
+% the entropic prior; all other cases will be estimated using ML [1:psz]
+% sparse - 1 means use 1D sparse array to represent CPT [0]
+%
+% e.g., tabular_CPD(bnet, i, 'CPT', T)
+% e.g., tabular_CPD(bnet, i, 'CPT', 'unif', 'dirichlet_weight', 2, 'dirichlet_type', 'unif')
+%
+% REFERENCES
+% M. Brand - "Structure learning in conditional probability models via an entropic prior
+% and parameter extinction", Neural Computation 11 (1999): 1155--1182
+% M. Brand - "Pattern discovery via entropy minimization" [covers annealing]
+% AI & Statistics 1999. Equation numbers refer to this paper, which is available from
+% www.merl.com/reports/docs/TR98-21.pdf
+% D. Heckerman, D. Geiger and M. Chickering,
+% "Learning Bayesian networks: the combination of knowledge and statistical data",
+% Microsoft Research Tech Report, 1994
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'tabular_CPD', discrete_CPD(0, []));
+ return;
+elseif isa(bnet, 'tabular_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, self);
+fam_sz = ns([ps self]);
+psz = prod(ns(ps));
+CPD.sizes = fam_sz;
+CPD.leftright = 0;
+CPD.sparse = 0;
+
+% set defaults
+CPD.CPT = mk_stochastic(myrand(fam_sz));
+CPD.adjustable = 1;
+CPD.prior_type = 'none';
+dirichlet_type = 'BDeu';
+dirichlet_weight = 1;
+CPD.trim = 0;
+selfprob = 0.1;
+CPD.entropic_pcases = 1:psz;
+
+% extract optional args
+args = varargin;
+% check for old syntax CPD(bnet, i, CPT) as opposed to CPD(bnet, i, 'CPT', CPT)
+if ~isempty(args) & ~isstr(args{1})
+ CPD.CPT = myreshape(args{1}, fam_sz);
+ args = [];
+end
+
+for i=1:2:length(args)
+ switch args{i},
+ case 'CPT',
+ T = args{i+1};
+ if ischar(T)
+ switch T
+ case 'unif', CPD.CPT = mk_stochastic(myones(fam_sz));
+ case 'rnd', CPD.CPT = mk_stochastic(myrand(fam_sz));
+ otherwise, error(['invalid CPT ' T]);
+ end
+ else
+ CPD.CPT = myreshape(T, fam_sz);
+ end
+ case 'prior_type', CPD.prior_type = args{i+1};
+ case 'dirichlet_type', dirichlet_type = args{i+1};
+ case 'dirichlet_weight', dirichlet_weight = args{i+1};
+ case 'adjustable', CPD.adjustable = args{i+1};
+ case 'clamped', CPD.adjustable = ~args{i+1};
+ case 'trim', CPD.trim = args{i+1};
+ case 'entropic_pcases', CPD.entropic_pcases = args{i+1};
+ case 'sparse', CPD.sparse = args{i+1};
+ otherwise, error(['invalid argument name: ' args{i}]);
+ end
+end
+
+switch CPD.prior_type
+ case 'dirichlet',
+ switch dirichlet_type
+ case 'unif', CPD.dirichlet = dirichlet_weight * myones(fam_sz);
+ case 'BDeu', CPD.dirichlet = (dirichlet_weight/psz) * mk_stochastic(myones(fam_sz));
+ otherwise, error(['invalid dirichlet_type ' dirichlet_type])
+ end
+ case {'entropic', 'none'}
+ CPD.dirichlet = [];
+ otherwise, error(['invalid prior_type ' prior_type])
+end
+
+
+
+% fields to do with learning
+if ~CPD.adjustable
+ CPD.counts = [];
+ CPD.nparams = 0;
+ CPD.nsamples = [];
+else
+ %CPD.counts = zeros(size(CPD.CPT));
+ CPD.counts = zeros(prod(size(CPD.CPT)), 1);
+ psz = fam_sz(1:end-1);
+ ss = fam_sz(end);
+ if CPD.leftright
+ % For each of the Qps contexts, we specify Q elements on the diagoanl
+ CPD.nparams = Qps * Q;
+ else
+ % sum-to-1 constraint reduces the effective arity of the node by 1
+ CPD.nparams = prod([psz ss-1]);
+ end
+ CPD.nsamples = 0;
+end
+
+CPD.trimmed_trans = [];
+fam_sz = CPD.sizes;
+
+%psz = prod(fam_sz(1:end-1));
+%ssz = fam_sz(end);
+%CPD.trimmed_trans = zeros(psz, ssz); % must declare before reading
+
+%sparse CPT
+if CPD.sparse
+ CPD.CPT = sparse(CPD.CPT(:));
+end
+
+CPD = class(CPD, 'tabular_CPD', discrete_CPD(~CPD.adjustable, fam_sz));
+
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.CPT = [];
+CPD.sizes = [];
+CPD.prior_type = [];
+CPD.dirichlet = [];
+CPD.adjustable = [];
+CPD.counts = [];
+CPD.nparams = [];
+CPD.nsamples = [];
+CPD.trim = [];
+CPD.trimmed_trans = [];
+CPD.leftright = [];
+CPD.entropic_pcases = [];
+CPD.sparse = [];
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/update_ess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/update_ess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+% UPDATE_ESS Update the Expected Sufficient Statistics of a tabular node.
+% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv)
+
+dom = fmarginal.domain;
+
+if all(hidden_bitv(dom))
+ CPD = update_ess_simple(CPD, fmarginal.T);
+ %fullm = add_ev_to_dmarginal(fmarginal, evidence, ns);
+ %assert(approxeq(fullm.T(:), fmarginal.T(:)))
+else
+ fullm = add_ev_to_dmarginal(fmarginal, evidence, ns);
+ CPD = update_ess_simple(CPD, fullm.T);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/update_ess_simple.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_CPD/update_ess_simple.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function CPD = update_ess_simple(CPD, counts)
+% UPDATE_ESS_SIMPLE Update the Expected Sufficient Statistics of a tabular node.
+% function CPD = update_ess_simple(CPD, counts)
+
+CPD.nsamples = CPD.nsamples + 1;
+CPD.counts = CPD.counts + counts(:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CPD_to_CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CPD_to_CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function CPT = CPD_to_CPT(CPD)
+% CPD_TO_CPT Convert the tabular_decision_node to a CPT
+% CPT = CPD_to_CPT(CPD)
+
+CPT = CPD.CPT;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/CPD_to_CPT.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/display.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/get_field.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/set_fields.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/tabular_decision_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@tabular_decision_node
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/tabular_decision_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@tabular_decision_node/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/Old/tabular_decision_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/Old/tabular_decision_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function CPD = tabular_decision_node(sz, CPT)
+% TABULAR_DECISION_NODE Represent the randomized policy over a discrete decision/action node as a table
+% CPD = tabular_decision_node(sz, CPT)
+%
+% sz(1:end-1) is the sizes of the parents, sz(end) is the size of this node
+% By default, CPT is set to the uniform random policy
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'tabular_decision_node');
+ return;
+elseif isa(sz, 'tabular_decision_node')
+ % This might occur if we are copying an object.
+ CPD = sz;
+ return;
+end
+CPD = init_fields;
+
+if nargin < 2
+ CPT = mk_stochastic(myones(sz));
+else
+ CPT = myreshape(CPT, sz);
+end
+
+CPD.CPT = CPT;
+CPD.size = sz;
+
+CPD = class(CPD, 'tabular_decision_node');
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.CPT = [];
+CPD.size = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(CPD)
+
+disp('tabular decision node object');
+disp(struct(CPD));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/get_field.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/get_field.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function vals = get_field(CPD, name)
+% GET_PARAMS Get the parameters (fields) for a tabular_decision_node object
+% vals = get_params(CPD, name)
+%
+% The following fields can be accessed
+%
+% policy - the table containing the policy
+%
+% e.g., policy = get_params(CPD, 'policy')
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'policy', vals = CPD.CPT;
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/set_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/set_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function CPD = set_params(CPD, varargin)
+% SET_PARAMS Set the parameters (fields) for a tabular_decision_node object
+% CPD = set_params(CPD, name/value pairs)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+%
+% policy - the table containing the policy
+%
+% e.g., CPD = set_params(CPD, 'policy', T)
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'policy', CPD.CPT = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/tabular_decision_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_decision_node/tabular_decision_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+function CPD = tabular_decision_node(bnet, self, CPT)
+% TABULAR_DECISION_NODE Represent a stochastic policy over a discrete decision/action node as a table
+% CPD = tabular_decision_node(bnet, self, CPT)
+%
+% node is the number of a node in this equivalence class.
+% CPT is an optional argument (see tabular_CPD for details); by default, it is the uniform policy.
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'tabular_decision_node', discrete_CPD(1, []));
+ return;
+elseif isa(bnet, 'tabular_decision_node')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+ns = bnet.node_sizes;
+fam = family(bnet.dag, self);
+ps = parents(bnet.dag, self);
+sz = ns(fam);
+
+if nargin < 3
+ CPT = mk_stochastic(myones(sz));
+else
+ CPT = myreshape(CPT, sz);
+end
+
+CPD.CPT = CPT;
+CPD.sizes = sz;
+
+clamped = 1; % don't update using EM
+CPD = class(CPD, 'tabular_decision_node', discrete_CPD(clamped, ns([ps self])));
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.CPT = [];
+CPD.sizes = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/convert_to_pot.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/convert_to_table.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/get_field.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/set_fields.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/tabular_kernel.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@tabular_kernel
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/tabular_kernel.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@tabular_kernel/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/Old/tabular_kernel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/Old/tabular_kernel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+function K = tabular_kernel(fg, self)
+% TABULAR_KERNEL Make a table-based local kernel (discrete potential)
+% K = tabular_kernel(fg, self)
+%
+% fg is a factor graph
+% self is the number of a representative domain
+%
+% Use 'set_params_kernel' to adjust the following fields
+% table - a q[1]xq[2]x... array, where q[i] is the number of values for i'th node
+% in this domain [default: random values from [0,1], which need not sum to 1]
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ K = init_fields;
+ K = class(K, 'tabular_kernel');
+ return;
+elseif isa(fg, 'tabular_kernel')
+ % This might occur if we are copying an object.
+ K = fg;
+ return;
+end
+K = init_fields;
+
+ns = fg.node_sizes;
+dom = fg.doms{self};
+% we don't store the actual domain since it may vary due to parameter tieing
+K.sz = ns(dom);
+K.table = myrand(K.sz);
+
+K = class(K, 'tabular_kernel');
+
+
+%%%%%%%
+
+
+function K = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+K.table = [];
+K.sz = [];
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/convert_to_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/convert_to_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+function pot = convert_to_pot(CPD, pot_type, domain, evidence)
+% CONVERT_TO_POT Convert a tabular CPD to one or more potentials
+% pot = convert_to_pot(CPD, pot_type, domain, evidence)
+
+% This is the same as discrete_CPD/convert_to_pot,
+% except we didn't want to the kernel to inherit methods like sample_node etc.
+
+sz = CPD.sz;
+ns = zeros(1, max(domain));
+ns(domain) = sz;
+
+odom = domain(~isemptycell(evidence(domain)));
+T = convert_to_table(CPD, domain, evidence);
+
+switch pot_type
+ case 'u',
+ pot = upot(domain, sz, T, 0*myones(sz));
+ case 'd',
+ ns(odom) = 1;
+ pot = dpot(domain, ns(domain), T);
+ case 'c',
+ % Since we want the output to be a Gaussian, the whole family must be observed.
+ % In other words, the potential is really just a constant.
+ p = T.p;
+ %p = prob_node(CPD, evidence(domain(end)), evidence(domain(1:end-1)));
+ ns(domain) = 0;
+ pot = cpot(domain, ns(domain), log(p));
+ case 'cg',
+ T = T(:);
+ ns(odom) = 1;
+ can = cell(1, length(T));
+ for i=1:length(T)
+ can{i} = cpot([], [], log(T(i)));
+ end
+ pot = cgpot(domain, [], ns, can);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/convert_to_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/convert_to_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function T = convert_to_table(CPD, domain, evidence)
+% CONVERT_TO_TABLE Convert a discrete CPD to a table
+% T = convert_to_table(CPD, domain, evidence)
+%
+% We convert the CPD to a CPT, and then lookup the evidence on the discrete parents.
+% The resulting table can easily be converted to a potential.
+
+CPT = CPD.table;
+odom = domain(~isemptycell(evidence(domain)));
+vals = cat(1, evidence{odom});
+map = find_equiv_posns(odom, domain);
+index = mk_multi_index(length(domain), map, vals);
+T = CPT(index{:});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/get_field.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/get_field.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function val = get_params_kernel(K, name)
+% GET_PARAMS_KERNEL Accessor function for a field (tabular_kernel)
+% val = get_params_kernel(K, name)
+%
+% e.g., get_params_kernel(K, 'table')
+
+switch name
+ case 'table', val = K.table;
+ otherwise,
+ error(['invalid field name ' name]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/set_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/set_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function K = set_params_kernel(K, name, val)
+% SET_PARAMS_KERNEL Accessor function for a field (table_kernel)
+% K = set_params_kernel(K, name, val)
+%
+% e.g., K = set_params_kernel(K, 'table', rand(2,3,2)) for a kernel on 3 nodes with 2,3,2 values each
+
+% We should check if the arguments are valid...
+
+switch name
+ case 'table', K.table = val;
+ otherwise,
+ error(['invalid field name ' name]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/tabular_kernel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_kernel/tabular_kernel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function K = tabular_kernel(sz, table)
+% TABULAR_KERNEL Make a table-based local kernel (discrete potential)
+% K = tabular_kernel(sz, table)
+%
+% sz(i) is the number of values the i'th member of this kernel can have
+% table is an optional array of size sz[1] x sz[2] x... [default: random]
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ K = init_fields;
+ K = class(K, 'tabular_kernel');
+ return;
+elseif isa(sz, 'tabular_kernel')
+ % This might occur if we are copying an object.
+ K = sz;
+ return;
+end
+K = init_fields;
+
+if nargin < 2, table = myrand(sz); end
+
+K.sz = sz;
+K.table = table;
+
+K = class(K, 'tabular_kernel');
+
+
+%%%%%%%
+
+
+function K = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+K.sz = [];
+K.table = [];
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/convert_to_pot.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/display.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/tabular_utility_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@tabular_utility_node
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/convert_to_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/convert_to_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function pot = convert_to_pot(CPD, pot_type, domain, evidence)
+% CONVERT_TO_POT Convert a tabular utility node to one or more potentials
+% pot = convert_to_pot(CPD, pot_type, domain, evidence)
+
+switch pot_type
+ case 'u',
+ sz = [CPD.sizes 1]; % the utility node itself has size 1
+ pot = upot(domain, sz, 1*myones(sz), myreshape(CPD.T, sz));
+ otherwise,
+ error(['can''t convert a utility node to a ' pot_type ' potential']);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(CPD)
+
+disp('tabular utility node object');
+disp(struct(CPD));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/tabular_utility_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tabular_utility_node/tabular_utility_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+function CPD = tabular_utility_node(bnet, node, T)
+% TABULAR_UTILITY_NODE Represent a utility function as a table
+% CPD = tabular_utility_node(bnet, node, T)
+%
+% node is the number of a node in this equivalence class.
+% T is an optional argument (same shape as the CPT in tabular_CPD, but missing the last (child)
+% dimension). By default, entries in T are chosen u.a.r. from 0:1 (using 'rand').
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ clamp = 0;
+ CPD = class(CPD, 'tabular_utility_node');
+ return;
+elseif isa(bnet, 'tabular_utility_node')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, node);
+sz = ns(ps);
+
+if nargin < 3
+ T = myrand(sz);
+else
+ T = myreshape(T, sz);
+end
+
+CPD.T = T;
+CPD.sizes = sz;
+
+CPD = class(CPD, 'tabular_utility_node');
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.T = [];
+CPD.sizes = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+/display.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/evaluate_tree_performance.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/get_field.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/learn_params.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/readme.txt/1.1.1.1/Wed May 29 15:59:54 2002//
+/set_fields.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/tree_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/@tree_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(CPD)
+
+disp('dtree_CPD object');
+disp(struct(CPD));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/evaluate_tree_performance.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/evaluate_tree_performance.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,82 @@
+function [score,outputs] = evaluate(CPD, fam, data, ns, cnodes)
+% Evaluate evaluate the performance of the classification/regression tree on given complete data
+% score = evaluate(CPD, fam, data, ns, cnodes)
+%
+% fam(i) is the node id of the i-th node in the family of nodes, self node is the last one
+% data(i,m) is the value of node i in case m (can be cell array).
+% ns(i) is the node size for the i-th node in the whold bnet
+% cnodes(i) is the node id for the i-th continuous node in the whole bnet
+%
+% Output
+% score is the classification accuracy (for classification)
+% or mean square deviation (for regression)
+% here for every case we use the mean value at the tree leaf node as its predicted value
+% outputs(i) is the predicted output value for case i
+%
+% Author: yimin.zhang@intel.com
+% Last updated: Jan. 19, 2002
+
+
+if iscell(data)
+ local_data = cell2num(data(fam,:));
+else
+ local_data = data(fam, :);
+end
+
+%get local node sizes and node types
+node_sizes = ns(fam);
+node_types = zeros(1,size(ns,2)); %all nodes are disrete
+node_types(cnodes)=1;
+node_types=node_types(fam);
+
+fam_size=size(fam,2);
+output_type = node_types(fam_size);
+
+num_cases=size(local_data,2);
+total_error=0;
+
+outputs=zeros(1,num_cases);
+for i=1:num_cases
+ %class one case using the tree
+ cur_node=CPD.tree.root; % at the root node of the tree
+ while (1)
+ if (CPD.tree.nodes(cur_node).is_leaf==1)
+ if (output_type==0) %output is discrete
+ %use the class with max probability as the output
+ [maxvalue,class_id]=max(CPD.tree.nodes(cur_node).probs);
+ outputs(i)=class_id;
+ if (class_id~=local_data(fam_size,i))
+ total_error=total_error+1;
+ end
+ else %output is continuous
+ %use the mean as the value
+ outputs(i)=CPD.tree.nodes(cur_node).mean;
+ cur_deviation = CPD.tree.nodes(cur_node).mean-local_data(fam_size,i);
+ total_error=total_error+cur_deviation*cur_deviation;
+ end
+ break;
+ end
+ cur_attr = CPD.tree.nodes(cur_node).split_id;
+ attr_val = local_data(cur_attr,i);
+ if (node_types(cur_attr)==0) %discrete attribute
+ % goto the attr_val -th child
+ cur_node = CPD.tree.nodes(cur_node).children(attr_val);
+ else
+ if (attr_val <= CPD.tree.nodes(cur_node).split_threshhold)
+ cur_node = CPD.tree.nodes(cur_node).children(1);
+ else
+ cur_node = CPD.tree.nodes(cur_node).children(2);
+ end
+ end
+ if (cur_node > CPD.tree.num_node)
+ fprintf('Fatal error: Tree structure corrupted.\n');
+ return;
+ end
+ end
+ %update the classification error number
+end
+if (output_type==0)
+ score=1-total_error/num_cases;
+else
+ score=total_error/num_cases;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/get_field.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/get_field.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function val = get_params(CPD, name)
+% GET_PARAMS Get the parameters (fields) for a tabular_CPD object
+% val = get_params(CPD, name)
+%
+% The following fields can be accessed
+%
+% cpt - the CPT
+%
+% e.g., CPT = get_params(CPD, 'cpt')
+
+switch name
+ case 'cpt', val = CPD.CPT;
+ case 'tree', val = CPD.tree;
+ otherwise,
+ error(['invalid argument name ' name]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/learn_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/learn_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,642 @@
+function CPD = learn_params(CPD, fam, data, ns, cnodes, varargin)
+% LEARN_PARAMS Construct classification/regression tree given complete data
+% CPD = learn_params(CPD, fam, data, ns, cnodes)
+%
+% fam(i) is the node id of the i-th node in the family of nodes, self node is the last one
+% data(i,m) is the value of node i in case m (can be cell array).
+% ns(i) is the node size for the i-th node in the whold bnet
+% cnodes(i) is the node id for the i-th continuous node in the whole bnet
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% stop_cases: for early stop (pruning). A node is not split if it has less than k cases. default is 0.
+% min_gain: for early stop (pruning).
+% For discrete output: A node is not split when the gain of best split is less than min_gain. default is 0.
+% For continuous (cts) outpt: A node is not split when the gain of best split is less than min_gain*score(root)
+% (we denote it cts_min_gain). default is 0.006
+% %%%%%%%%%%%%%%%%%%%Struction definition of dtree_CPD.tree%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% tree.num_node the last position in tree.nodes array for adding new nodes,
+% it is not always same to number of nodes in a tree, because some position in the
+% tree.nodes array can be set to unused (e.g. in tree pruning)
+% tree.nodes is the array of nodes in the tree plus some unused nodes.
+% tree.nodes(1) is the root for the tree.
+%
+% Below is the attributes for each node
+% tree.nodes(i).used; % flag this node is used (0 means node not used, it can be removed from tree to save memory)
+% tree.nodes(i).is_leaf; % if 1 means this node is a leaf, if 0 not a leaf.
+% tree.nodes(i).children; % children(i) is the node number in tree.nodes array for the i-th child node
+% tree.nodes(i).split_id; % the attribute id used to split this node
+% tree.nodes(i).split_threshhold; % the threshhold for continuous attribute to split this node
+% %%%%%attributes specially for classification tree (discrete output)
+% tree.nodes(i).probs % probs(i) is the prob for i-th value of class node
+% % For three output class, the probs = [0.9 0.1 0.0] means the probability of
+% % class 1 is 0.9, for class 2 is 0.1, for class 3 is 0.0.
+% %%%%%attributes specially for regression tree (continuous output)
+% tree.nodes(i).mean % mean output value for this node
+% tree.nodes(i).std % standard deviation for output values in this node
+%
+% Author: yimin.zhang@intel.com
+% Last updated: Jan. 19, 2002
+
+% Want list:
+% (1) more efficient for cts attributes: get the values of cts attributes at first (the begining of build_tree function), then doing bi_search in finding threshhold
+% (2) pruning classification tree using Pessimistic Error Pruning
+% (3) bi_search for strings (used for transform data to BNT format)
+
+global tree %tree must be global so that it can be accessed in recursive slitting function
+global cts_min_gain
+tree=[]; % clear the tree
+tree.num_node=0;
+cts_min_gain=0;
+
+stop_cases=0;
+min_gain=0;
+
+args = varargin;
+nargs = length(args);
+if (nargs>0)
+ if isstr(args{1})
+ for i=1:2:nargs
+ switch args{i},
+ case 'stop_cases', stop_cases = args{i+1};
+ case 'min_gain', min_gain = args{i+1};
+ end
+ end
+ else
+ error(['error in input parameters']);
+ end
+end
+
+if iscell(data)
+ local_data = cell2num(data(fam,:));
+else
+ local_data = data(fam, :);
+end
+%counts = compute_counts(local_data, CPD.sizes);
+%CPD.CPT = mk_stochastic(counts + CPD.prior); % bug fix 11/5/01
+node_types = zeros(1,size(ns,2)); %all nodes are disrete
+node_types(cnodes)=1;
+%make the data be BNT compliant (values for discrete nodes are from 1-n, here n is the node size)
+%trans_data=transform_data(local_data,'tmp.dat',[]); %here no cts nodes
+
+build_dtree (CPD, local_data, ns(fam), node_types(fam),stop_cases,min_gain);
+%CPD.tree=copy_tree(tree);
+CPD.tree=tree; %copy the tree constructed to CPD
+
+
+function new_tree = copy_tree(tree)
+% copy the tree to new_tree
+new_tree.num_node=tree.num_node;
+new_tree.root = tree.root;
+for i=1:tree.num_node
+ new_tree.nodes(i)=tree.nodes(i);
+end
+
+
+function build_dtree (CPD, fam_ev, node_sizes, node_types,stop_cases,min_gain)
+global tree
+global cts_min_gain
+
+tree.num_node=0; %the current number of nodes in the tree
+tree.root=1;
+
+T = 1:size(fam_ev,2) ; %all cases
+candidate_attrs = 1:(size(node_sizes,2)-1); %all attributes
+node_id=1; %the root node
+lastnode=size(node_sizes,2); %the last element in all nodes is the dependent variable (category node)
+num_cat=node_sizes(lastnode);
+
+% get minimum gain for cts output (used in stop splitting)
+if (node_types(size(fam_ev,1))==1) %cts output
+ N = size(fam_ev,2);
+ output_id = size(fam_ev,1);
+ cases_T = fam_ev(output_id,:); %get all the output value for cases T
+ std_T = std(cases_T);
+ avg_y_T = mean(cases_T);
+ sqr_T = cases_T - avg_y_T;
+ cts_min_gain = min_gain*(sum(sqr_T.*sqr_T)/N); % min_gain * (R(root) = 1/N * SUM(y-avg_y)^2)
+end
+
+split_dtree (CPD, fam_ev, node_sizes, node_types, stop_cases,min_gain, T, candidate_attrs, num_cat);
+
+
+
+% pruning method
+% (1) Restrictions on minimum node size: A node is not split if it has smaller than k cases.
+% (2) Threshholds on impurity: a threshhold is imposed on the splitting test score. Threshhold can be
+% imposed on local goodness measure (the gain_ratio of a node) or global goodness.
+% (3) Mininum Error Pruning (MEP), (no need pruning set)
+% Prune if static error<=backed-up error
+% Static error at node v: e(v) = (Nc + 1)/(N+k) (laplace estimate, prior for each class equal)
+% here N is # of all examples, Nc is # of majority class examples, k is number of classes
+% Backed-up error at node v: (Ti is the i-th subtree root)
+% E(T) = Sum_1_to_n(pi*e(Ti))
+% (4) Pessimistic Error Pruning (PEP), used in Quilan C4.5 (no need pruning set, efficient because of pruning top-down)
+% Probability of error (apparent error rate)
+% q = (N-Nc+0.5)/N
+% where N=#examples, Nc=#examples in majority class
+% Error of a node v (if pruned) q(v)= (Nv- Nc,v + 0.5)/Nv
+% Error of a subtree q(T)= Sum_of_l_leaves(Nl - Nc,l + 0.5)/Sum_of_l_leaves(Nl)
+% Prune if q(v)<=q(T)
+%
+% Implementation statuts:
+% (1)(2) has been implemented as the input parameters of learn_params.
+% (4) is implemented in this function
+function pruning(fam_ev,node_sizes,node_types)
+% PRUNING prune the constructed tree using PEP
+% pruning(fam_ev,node_sizes,node_types)
+%
+% fam_ev(i,j) is the value of attribute i in j-th training cases (for whole tree), the last row is for the class label (self_ev)
+% node_sizes(i) is the node size for the i-th node in the family
+% node_types(i) is the node type for the i-th node in the family, 0 for disrete node, 1 for continous node
+% the global parameter 'tree' is for storing the input tree and the pruned tree
+
+
+function split_T = split_cases(fam_ev,node_sizes,node_types,T,node_i, threshhold)
+% SPLIT_CASES split the cases T according to values of node_i in the family
+% split_T = split_cases(fam_ev,node_sizes,node_types,T,node_i)
+%
+% fam_ev(i,j) is the value of attribute i in j-th training cases (for whole tree), the last row is for the class label (self_ev)
+% node_sizes(i) is the node size for the i-th node in the family
+% node_types(i) is the node type for the i-th node in the family, 0 for disrete node, 1 for continous node
+% node_i is the attribute we need to split
+
+if (node_types(node_i)==0) %discrete attribute
+ %init the subsets of T
+ split_T = cell(1,node_sizes(node_i)); %T will be separated into |node_size of i| subsets according to different values of node i
+ for i=1:node_sizes(node_i) % here we assume that the value of an attribute is 1:node_size
+ split_T{i}=zeros(1,0);
+ end
+
+ size_t = size(T,2);
+ for i=1:size_t
+ case_id = T(i);
+ %put this case into one subset of split_T according to its value for node_i
+ value = fam_ev(node_i,case_id);
+ pos = size(split_T{value},2)+1;
+ split_T{value}(pos)=case_id; % here assumes the value of an attribute is 1:node_size
+ end
+else %continuous attribute
+ %init the subsets of T
+ split_T = cell(1,2); %T will be separated into 2 subsets (<=threshhold) (>threshhold)
+ for i=1:2
+ split_T{i}=zeros(1,0);
+ end
+
+ size_t = size(T,2);
+ for i=1:size_t
+ case_id = T(i);
+ %put this case into one subset of split_T according to its value for node_i
+ value = fam_ev(node_i,case_id);
+ subset_num=1;
+ if (value>threshhold)
+ subset_num=2;
+ end
+ pos = size(split_T{subset_num},2)+1;
+ split_T{subset_num}(pos)=case_id;
+ end
+end
+
+
+
+function new_node = split_dtree (CPD, fam_ev, node_sizes, node_types, stop_cases, min_gain, T, candidate_attrs, num_cat)
+% SPLIT_TREE Split the tree at node node_id with cases T (actually it is just indexes to family evidences).
+% new_node = split_dtree (fam_ev, node_sizes, node_types, T, node_id, num_cat, method)
+%
+% fam_ev(i,j) is the value of attribute i in j-th training cases (for whole tree), the last row is for the class label (self_ev)
+% node_sizes{i} is the node size for the i-th node in the family
+% node_types{i} is the node type for the i-th node in the family, 0 for disrete node, 1 for continous node
+% stop_cases is the threshold of number of cases to stop slitting
+% min_gain is the minimum gain need to split a node
+% T(i) is the index of i-th cases in current decision tree node, we need split it further
+% candidate_attrs(i) the node id for the i-th attribute that still need to be considered as split attribute
+%%%%% node_id is the index of current node considered for a split
+% num_cat is the number of output categories for the decision tree
+% output:
+% new_node is the new node created
+global tree
+global cts_min_gain
+
+size_fam = size(fam_ev,1); %number of family size
+output_type = node_types(size_fam); %the type of output for the tree (0 is discrete, 1 is continuous)
+size_attrs = size(candidate_attrs,2); %number of candidate attributes
+size_t = size(T,2); %number of training cases in this tree node
+
+%(1)computeFrequenceyForEachClass(T)
+if (output_type==0) %discrete output
+ class_freqs = zeros(1,num_cat);
+ for i=1:size_t
+ case_id = T(i);
+ case_class = fam_ev(size_fam,case_id); %get the class label for this case
+ class_freqs(case_class)=class_freqs(case_class)+1;
+ end
+else %cts output
+ N = size(fam_ev,2);
+ cases_T = fam_ev(size(fam_ev,1),T); %get the output value for cases T
+ std_T = std(cases_T);
+end
+
+%(2) if OneClass (for discrete output) or same output value (for cts output) or Class With #examples < stop_cases
+% return a leaf;
+% create a decision node N;
+
+% get majority class in this node
+if (output_type == 0)
+ top1_class = 0; %the class with the largest number of cases
+ top1_class_cases = 0; %the number of cases in top1_class
+ [top1_class_cases,top1_class]=max(class_freqs);
+end
+
+if (size_t==0) %impossble
+ new_node=-1;
+ fprintf('Fatal error: please contact the author. \n');
+ return;
+end
+
+% stop splitting if needed
+ %for discrete output: one class
+ %for cts output, all output value in cases are same
+ %cases too little
+if ( (output_type==0 & top1_class_cases == size_t) | (output_type==1 & std_T == 0) | (size_t < stop_cases))
+ %create one new leaf node
+ tree.num_node=tree.num_node+1;
+ tree.nodes(tree.num_node).used=1; %flag this node is used (0 means node not used, it will be removed from tree at last to save memory)
+ tree.nodes(tree.num_node).is_leaf=1;
+ tree.nodes(tree.num_node).children=[];
+ tree.nodes(tree.num_node).split_id=0; %the attribute(parent) id to split this tree node
+ tree.nodes(tree.num_node).split_threshhold=0;
+ if (output_type==0)
+ tree.nodes(tree.num_node).probs=class_freqs/size_t; %the prob for each value of class node
+
+ % tree.nodes(tree.num_node).probs=zeros(1,num_cat); %the prob for each value of class node
+ % tree.nodes(tree.num_node).probs(top1_class)=1; %use the majority class of parent node, like for binary class,
+ %and majority is class 2, then the CPT is [0 1]
+ %we may need to use prior to do smoothing, to get [0.001 0.999]
+ tree.nodes(tree.num_node).error.self_error=1-top1_class_cases/size_t; %the classfication error in this tree node when use default class
+ tree.nodes(tree.num_node).error.all_error=1-top1_class_cases/size_t; %no total classfication error in this tree node and its subtree
+ tree.nodes(tree.num_node).error.all_error_num=size_t - top1_class_cases;
+ fprintf('Create leaf node(onecla) %d. Class %d Cases %d Error %d \n',tree.num_node, top1_class, size_t, size_t - top1_class_cases );
+ else
+ avg_y_T = mean(cases_T);
+ tree.nodes(tree.num_node).mean = avg_y_T;
+ tree.nodes(tree.num_node).std = std_T;
+ fprintf('Create leaf node(samevalue) %d. Mean %8.4f Std %8.4f Cases %d \n',tree.num_node, avg_y_T, std_T, size_t);
+ end
+ new_node = tree.num_node;
+ return;
+end
+
+%create one new node
+tree.num_node=tree.num_node+1;
+tree.nodes(tree.num_node).used=1; %flag this node is used (0 means node not used, it will be removed from tree at last to save memory)
+tree.nodes(tree.num_node).is_leaf=1;
+tree.nodes(tree.num_node).children=[];
+tree.nodes(tree.num_node).split_id=0;
+tree.nodes(tree.num_node).split_threshhold=0;
+if (output_type==0)
+ tree.nodes(tree.num_node).error.self_error=1-top1_class_cases/size_t;
+ tree.nodes(tree.num_node).error.all_error=0;
+ tree.nodes(tree.num_node).error.all_error_num=0;
+else
+ avg_y_T = mean(cases_T);
+ tree.nodes(tree.num_node).mean = avg_y_T;
+ tree.nodes(tree.num_node).std = std_T;
+end
+new_node = tree.num_node;
+
+%Stop splitting if no attributes left in this node
+if (size_attrs==0)
+ if (output_type==0)
+ tree.nodes(tree.num_node).probs=class_freqs/size_t; %the prob for each value of class node
+ tree.nodes(tree.num_node).error.all_error=1-top1_class_cases/size_t;
+ tree.nodes(tree.num_node).error.all_error_num=size_t - top1_class_cases;
+ fprintf('Create leaf node(noattr) %d. Class %d Cases %d Error %d \n',tree.num_node, top1_class, size_t, size_t - top1_class_cases );
+ else
+ fprintf('Create leaf node(noattr) %d. Mean %8.4f Std %8.4f Cases %d \n',tree.num_node, avg_y_T, std_T, size_t);
+ end
+ return;
+end
+
+
+%(3) for each attribute A
+% ComputeGain(A);
+max_gain=0; %the max gain score (for discrete information gain or gain ration, for cts node the R(T))
+best_attr=0; %the attribute with the max_gain
+best_split = []; %the split of T according to the value of best_attr
+cur_best_threshhold = 0; %the threshhold for split continuous attribute
+best_threshhold=0;
+
+% compute Info(T) (for discrete output)
+if (output_type == 0)
+ class_split_T = split_cases(fam_ev,node_sizes,node_types,T,size(fam_ev,1),0); %split cases according to class
+ info_T = compute_info (fam_ev, T, class_split_T);
+else % compute R(T) (for cts output)
+% N = size(fam_ev,2);
+% cases_T = fam_ev(size(fam_ev,1),T); %get the output value for cases T
+% std_T = std(cases_T);
+% avg_y_T = mean(cases_T);
+ sqr_T = cases_T - avg_y_T;
+ R_T = sum(sqr_T.*sqr_T)/N; % get R(T) = 1/N * SUM(y-avg_y)^2
+ info_T = R_T;
+end
+
+for i=1:(size_fam-1)
+ if (myismember(i,candidate_attrs)) %if this attribute still in the candidate attribute set
+ if (node_types(i)==0) %discrete attibute
+ split_T = split_cases(fam_ev,node_sizes,node_types,T,i,0); %split cases according to value of attribute i
+ % For cts output, we compute the least square gain.
+ % For discrete output, we compute gain ratio
+ cur_gain = compute_gain(fam_ev,node_sizes,node_types,T,info_T,i,split_T,0,output_type); %gain ratio
+ else %cts attribute
+ %get the values of this attribute
+ ev = fam_ev(:,T);
+ values = ev(i,:);
+ sort_v = sort(values);
+ %remove the duplicate values in sort_v
+ v_set = unique(sort_v);
+ best_gain = 0;
+ best_threshhold = 0;
+ best_split1 = [];
+
+ %find the best split for this cts attribute
+ % see "Quilan 96: Improved Use of Continuous Attributes in C4.5"
+ for j=1:(size(v_set,2)-1)
+ mid_v = (v_set(j)+v_set(j+1))/2;
+ split_T = split_cases(fam_ev,node_sizes,node_types,T,i,mid_v); %split cases according to value of attribute i (<=mid_v)
+ % For cts output, we compute the least square gain.
+ % For discrete output, we use Quilan 96: use information gain instead of gain ratio to select threshhold
+ cur_gain = compute_gain(fam_ev,node_sizes,node_types,T,info_T,i,split_T,1,output_type);
+ %if (i==6)
+ % fprintf('gain %8.5f threshhold %6.3f spliting %d\n', cur_gain, mid_v, size(split_T{1},2));
+ %end
+
+ if (best_gain < cur_gain)
+ best_gain = cur_gain;
+ best_threshhold = mid_v;
+ %best_split1 = split_T; %here we need to copy array, not good!!! (maybe we can compute after we get best_attr
+ end
+ end
+ %recalculate the gain_ratio of the best_threshhold
+ split_T = split_cases(fam_ev,node_sizes,node_types,T,i,best_threshhold);
+ best_gain = compute_gain(fam_ev,node_sizes,node_types,T,info_T,i,split_T,0,output_type); %gain_ratio
+ if (output_type==0) %for discrete output
+ cur_gain = best_gain-log2(size(v_set,2)-1)/size_t; % Quilan 96: use the gain_ratio-log2(N-1)/|D| as the gain of this attr
+ else %for cts output
+ cur_gain = best_gain;
+ end
+ end
+
+ if (max_gain < cur_gain)
+ max_gain = cur_gain;
+ best_attr = i;
+ cur_best_threshhold=best_threshhold; %save the threshhold
+ %best_split = split_T; %here we need to copy array, not good!!! So we will recalculate in below line 313
+ end
+ end
+end
+
+% stop splitting if gain is too small
+if (max_gain==0 | (output_type==0 & max_gain < min_gain) | (output_type==1 & max_gain < cts_min_gain))
+ if (output_type==0)
+ tree.nodes(tree.num_node).probs=class_freqs/size_t; %the prob for each value of class node
+ tree.nodes(tree.num_node).error.all_error=1-top1_class_cases/size_t;
+ tree.nodes(tree.num_node).error.all_error_num=size_t - top1_class_cases;
+ fprintf('Create leaf node(nogain) %d. Class %d Cases %d Error %d \n',tree.num_node, top1_class, size_t, size_t - top1_class_cases );
+ else
+ fprintf('Create leaf node(nogain) %d. Mean %8.4f Std %8.4f Cases %d \n',tree.num_node, avg_y_T, std_T, size_t);
+ end
+ return;
+end
+
+%get the split of cases according to the best split attribute
+if (node_types(best_attr)==0) %discrete attibute
+ best_split = split_cases(fam_ev,node_sizes,node_types,T,best_attr,0);
+else
+ best_split = split_cases(fam_ev,node_sizes,node_types,T,best_attr,cur_best_threshhold);
+end
+
+%(4) best_attr = AttributeWithBestGain;
+%(5) if best_attr is continuous ???? why need this? maybe the value in the decision tree must appeared in data
+% find threshhold in all cases that <= max_V
+% change the split of T
+tree.nodes(tree.num_node).split_id=best_attr;
+tree.nodes(tree.num_node).split_threshhold=cur_best_threshhold; %for cts attribute only
+
+%note: below threshhold rejust is linera search, so it is slow. A better method is described in paper "Efficient C4.5"
+%if (output_type==0)
+if (node_types(best_attr)==1) %is a continuous attribute
+ %find the value that approximate best_threshhold from below (the largest that <= best_threshhold)
+ best_value=0;
+ for i=1:size(fam_ev,2) %note: need to search in all cases for all tree, not just in cases for this node
+ val = fam_ev(best_attr,i);
+ if (val <= cur_best_threshhold & val > best_value) %val is more clear to best_threshhold
+ best_value=val;
+ end
+ end
+ tree.nodes(tree.num_node).split_threshhold=best_value; %for cts attribute only
+end
+%end
+
+if (output_type == 0)
+ fprintf('Create node %d split at %d gain %8.4f Th %d. Class %d Cases %d Error %d \n',tree.num_node, best_attr, max_gain, tree.nodes(tree.num_node).split_threshhold, top1_class, size_t, size_t - top1_class_cases );
+else
+ fprintf('Create node %d split at %d gain %8.4f Th %d. Mean %8.4f Cases %d\n',tree.num_node, best_attr, max_gain, tree.nodes(tree.num_node).split_threshhold, avg_y_T, size_t );
+end
+
+%(6) Foreach T' in the split_T
+% if T' is Empty
+% Child of node_id is a leaf
+% else
+% Child of node_id = split_tree (T')
+tree.nodes(new_node).is_leaf=0; %because this node will be split, it is not leaf now
+for i=1:size(best_split,2)
+ if (size(best_split{i},2)==0) %T(i) is empty
+ %create one new leaf node
+ tree.num_node=tree.num_node+1;
+ tree.nodes(tree.num_node).used=1; %flag this node is used (0 means node not used, it will be removed from tree at last to save memory)
+ tree.nodes(tree.num_node).is_leaf=1;
+ tree.nodes(tree.num_node).children=[];
+ tree.nodes(tree.num_node).split_id=0;
+ tree.nodes(tree.num_node).split_threshhold=0;
+ if (output_type == 0)
+ tree.nodes(tree.num_node).probs=zeros(1,num_cat); %the prob for each value of class node
+ tree.nodes(tree.num_node).probs(top1_class)=1; %use the majority class of parent node, like for binary class,
+ %and majority is class 2, then the CPT is [0 1]
+ %we may need to use prior to do smoothing, to get [0.001 0.999]
+ tree.nodes(tree.num_node).error.self_error=0;
+ tree.nodes(tree.num_node).error.all_error=0;
+ tree.nodes(tree.num_node).error.all_error_num=0;
+ else
+ tree.nodes(tree.num_node).mean = avg_y_T; %just use parent node's mean value
+ tree.nodes(tree.num_node).std = std_T;
+ end
+ %add the new leaf node to parents
+ num_children=size(tree.nodes(new_node).children,2);
+ tree.nodes(new_node).children(num_children+1)=tree.num_node;
+ if (output_type==0)
+ fprintf('Create leaf node(nullset) %d. %d-th child of Father %d Class %d\n',tree.num_node, i, new_node, top1_class );
+ else
+ fprintf('Create leaf node(nullset) %d. %d-th child of Father %d \n',tree.num_node, i, new_node );
+ end
+
+ else
+ if (node_types(best_attr)==0) % if attr is discrete, it should be removed from the candidate set
+ new_candidate_attrs = mysetdiff(candidate_attrs,[best_attr]);
+ else
+ new_candidate_attrs = candidate_attrs;
+ end
+ new_sub_node = split_dtree (CPD, fam_ev, node_sizes, node_types, stop_cases, min_gain, best_split{i}, new_candidate_attrs, num_cat);
+ %tree.nodes(parent_id).error.all_error += tree.nodes(new_sub_node).error.all_error;
+ fprintf('Add subtree node %d to %d. #nodes %d\n',new_sub_node,new_node, tree.num_node );
+
+% tree.nodes(new_node).error.all_error_num = tree.nodes(new_node).error.all_error_num + tree.nodes(new_sub_node).error.all_error_num;
+ %add the new leaf node to parents
+ num_children=size(tree.nodes(new_node).children,2);
+ tree.nodes(new_node).children(num_children+1)=new_sub_node;
+ end
+end
+
+%(7) Compute errors of N; for doing pruning
+% get the total error for the subtree
+if (output_type==0)
+ tree.nodes(new_node).error.all_error=tree.nodes(new_node).error.all_error_num/size_t;
+end
+%doing pruning, but doing here is not so efficient, because it is bottom up.
+%if tree.nodes()
+%after doing pruning, need to update the all_error to self_error
+
+%(8) Return N
+
+
+
+
+%(1) For discrete output, we use GainRatio defined as below
+% Gain(X,T)
+% GainRatio(X,T) = ----------
+% SplitInfo(X,T)
+% where
+% Gain(X,T) = Info(T) - Info(X,T)
+% |Ti|
+% Info(X,T) = Sum for i from 1 to n of ( ---- * Info(Ti))
+% |T|
+
+% SplitInfo(D,T) is the information due to the split of T on the basis
+% of the value of the categorical attribute D. Thus SplitInfo(D,T) is
+% I(|T1|/|T|, |T2|/|T|, .., |Tm|/|T|)
+% where {T1, T2, .. Tm} is the partition of T induced by the value of D.
+
+% Definition of Info(Ti)
+% If a set T of records is partitioned into disjoint exhaustive classes C1, C2, .., Ck on the basis of the
+% value of the categorical attribute, then the information needed to identify the class of an element of T
+% is Info(T) = I(P), where P is the probability distribution of the partition (C1, C2, .., Ck):
+% P = (|C1|/|T|, |C2|/|T|, ..., |Ck|/|T|)
+% Here I(P) is defined as
+% I(P) = -(p1*log(p1) + p2*log(p2) + .. + pn*log(pn))
+%
+%(2) For continuous output (regression tree), we use least squares score (adapted from Leo Breiman's book "Classification and regression trees", page 231
+% The original support only binary split, we further extend it to permit multiple-child split
+%
+% Delta_R = R(T) - Sum for all childe nodes Ti (R(Ti))
+% Where R(Ti)= 1/N * Sum for all cases i in node Ti ((yi - avg_y(Ti))^2)
+% here N is the number of all training cases for construct the regression tree
+% avg_y(Ti) is the average value for output variable for the cases in node Ti
+
+function gain_score = compute_gain (fam_ev, node_sizes, node_types, T, info_T, attr_id, split_T, score_type, output_type)
+% COMPUTE_GAIN Compute the score for the split of cases T using attribute attr_id
+% gain_score = compute_gain (fam_ev, T, attr_id, node_size, method)
+%
+% fam_ev(i,j) is the value of attribute i in j-th training cases, the last row is for the class label (self_ev)
+% T(i) is the index of i-th cases in current decision tree node, we need split it further
+% attr_id is the index of current node considered for a split
+% split_T{i} is the i_th subset in partition of cases T according to the value of attribute attr_id
+% score_type if 0, is gain ratio, 1 is information gain (only apply to discrete output)
+% node_size(i) the node size of i-th node in the family
+% output_type: 0 means discrete output, 1 means continuous output.
+gain_score=0;
+% ***********for DISCRETE output*******************************************************
+if (output_type == 0)
+ % compute Info(T)
+ total_cnt = size(T,2);
+ if (total_cnt==0)
+ return;
+ end;
+ %class_split_T = split_cases(fam_ev,node_sizes,node_types,T,size(fam_ev,1),0); %split cases according to class
+ %info_T = compute_info (fam_ev, T, class_split_T);
+
+ % compute Info(X,T)
+ num_class = size(split_T,2);
+ subset_sizes = zeros(1,num_class);
+ info_ti = zeros(1,num_class);
+ for i=1:num_class
+ subset_sizes(i)=size(split_T{i},2);
+ if (subset_sizes(i)~=0)
+ class_split_Ti = split_cases(fam_ev,node_sizes,node_types,split_T{i},size(fam_ev,1),0); %split cases according to class
+ info_ti(i) = compute_info(fam_ev, split_T{i}, class_split_Ti);
+ end
+ end
+ ti_ratios = subset_sizes/total_cnt; %get the |Ti|/|T|
+ info_X_T = sum(ti_ratios.*info_ti);
+
+ %get Gain(X,T)
+ gain_X_T = info_T - info_X_T;
+
+ if (score_type == 1) %information gain
+ gain_score=gain_X_T;
+ return;
+ end
+ %compute the SplitInfo(X,T) //is this also for cts attr, only split into two subsets
+ splitinfo_T = compute_info (fam_ev, T, split_T);
+ if (splitinfo_T~=0)
+ gain_score = gain_X_T/splitinfo_T;
+ end
+
+% ************for continuous output**************************************************
+else
+ N = size(fam_ev,2);
+
+ % compute R(Ti)
+ num_class = size(split_T,2);
+ R_Ti = zeros(1,num_class);
+ for i=1:num_class
+ if (size(split_T{i},2)~=0)
+ cases_T = fam_ev(size(fam_ev,1),split_T{i});
+ avg_y_T = mean(cases_T);
+ sqr_T = cases_T - avg_y_T;
+ R_Ti(i) = sum(sqr_T.*sqr_T)/N; % get R(Ti) = 1/N * SUM(y-avg_y)^2
+ end
+ end
+ %delta_R = R(T) - SUM(R(Ti))
+ gain_score = info_T - sum(R_Ti);
+
+end
+
+
+% Definition of Info(Ti)
+% If a set T of records is partitioned into disjoint exhaustive classes C1, C2, .., Ck on the basis of the
+% value of the categorical attribute, then the information needed to identify the class of an element of T
+% is Info(T) = I(P), where P is the probability distribution of the partition (C1, C2, .., Ck):
+% P = (|C1|/|T|, |C2|/|T|, ..., |Ck|/|T|)
+% Here I(P) is defined as
+% I(P) = -(p1*log(p1) + p2*log(p2) + .. + pn*log(pn))
+function info = compute_info (fam_ev, T, split_T)
+% COMPUTE_INFO compute the information for the split of T into split_T
+% info = compute_info (fam_ev, T, split_T)
+
+total_cnt = size(T,2);
+num_class = size(split_T,2);
+subset_sizes = zeros(1,num_class);
+probs = zeros(1,num_class);
+log_probs = zeros(1,num_class);
+for i=1:num_class
+ subset_sizes(i)=size(split_T{i},2);
+end
+
+probs = subset_sizes/total_cnt;
+%log_probs = log2(probs); % if probs(i)=0, the log2(probs(i)) will be Inf
+for i=1:size(probs,2)
+ if (probs(i)~=0)
+ log_probs(i)=log2(probs(i));
+ end
+end
+
+info = sum(-(probs.*log_probs));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/readme.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/readme.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+Decision/regression tree CPD
+Author: Yimin Zhang yimin.zhang@intel.com
+21 Jan 2002
+
+
+See also Paul Bradley's Multisurface Method-Tree matlab code
+ http://www.cs.wisc.edu/~paulb/msmt/
+http://www.cs.wisc.edu/~olvi/uwmp/msmt.html
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/set_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/set_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function CPD = set_fields(CPD, varargin)
+% SET_PARAMS Set the parameters (fields) for a tabular_CPD object
+% CPD = set_params(CPD, name/value pairs)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+%
+% CPT - the CPT
+% prior - the prior
+% clamped - 1 means don't adjust during EM
+%
+% e.g., CPD = set_params(CPD, 'CPT', 'rnd')
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'CPT',
+ if ischar(args{i+1})
+ switch args{i+1}
+ case 'unif', CPD.CPT = mk_stochastic(myones(CPD.sizes));
+ case 'rnd', CPD.CPT = mk_stochastic(myrand(CPD.sizes));
+ otherwise, error(['invalid type ' args{i+1}]);
+ end
+ elseif isscalarBNT(args{i+1})
+ p = args{i+1};
+ k = CPD.sizes(end);
+ % Bug fix by Hervé BOUTROUILLE 10/1/01
+ CPD.CPT = myreshape(sample_dirichlet(p*ones(1,k), prod(CPD.sizes(1:end-1)), CPD.sizes));
+ %CPD.CPT = myreshape(sample_dirichlet(p*ones(1,k), prod(CPD.sizes(1:end-1))), CPD.sizes);
+ else
+ CPD.CPT = myreshape(args{i+1}, CPD.sizes);
+ end
+
+ case 'prior',
+ if ischar(args{i+1}) & strcmp(args{i+1}, 'unif')
+ CPD.prior = myones(CPD.sizes);
+ elseif isscalarBNT(args{i+1})
+ CPD.prior = args{i+1} * normalise(myones(CPD.sizes));
+ else
+ CPD.prior = myreshape(args{i+1}, CPD.sizes);
+ end
+
+ %case 'clamped', CPD.clamped = strcmp(args{i+1}, 'yes');
+ %case 'clamped', CPD = set_clamped(CPD, strcmp(args{i+1}, 'yes'));
+ case 'clamped', CPD = set_clamped(CPD, args{i+1});
+
+ otherwise,
+ %error(['invalid argument name ' args{i}]);
+ end
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/tree_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@tree_CPD/tree_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+function CPD = tree_CPD(varargin)
+%DTREE_CPD Make a conditional prob. distrib. which is a decision/regression tree.
+%
+% CPD =dtree_CPD() will create an empty tree.
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ clamp = 0;
+ CPD = class(CPD, 'tree_CPD', discrete_CPD(clamp, []));
+ return;
+elseif isa(varargin{1}, 'tree_CPD')
+ % This might occur if we are copying an object.
+ CPD = varargin{1};
+ return;
+end
+
+CPD = init_fields;
+
+
+clamped = 0;
+fam_sz = [];
+CPD = class(CPD, 'tree_CPD', discrete_CPD(clamped, fam_sz));
+
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+%init the decision tree set the root to null
+CPD.tree.num_node = 0;
+CPD.tree.root=1;
+CPD.tree.nodes=[];
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/mk_isolated_tabular_CPD.m/1.1.1.1/Mon Jun 24 18:58:32 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+A D/@boolean_CPD////
+A D/@deterministic_CPD////
+A D/@discrete_CPD////
+A D/@gaussian_CPD////
+A D/@generic_CPD////
+A D/@gmux_CPD////
+A D/@hhmm2Q_CPD////
+A D/@hhmmF_CPD////
+A D/@hhmmQ_CPD////
+A D/@mlp_CPD////
+A D/@noisyor_CPD////
+A D/@root_CPD////
+A D/@softmax_CPD////
+A D/@tabular_CPD////
+A D/@tabular_decision_node////
+A D/@tabular_kernel////
+A D/@tabular_utility_node////
+A D/@tree_CPD////
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/linear_gaussian_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/log_marg_prob_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/update_params_complete.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/Old/@linear_gaussian_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/linear_gaussian_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/linear_gaussian_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,87 @@
+function CPD = linear_gaussian_CPD(bnet, self, theta, sigma, theta0, n0, alpha0, beta0)
+% LINEAR_GAUSSIAN_CPD Make a linear Gaussian distrib.
+%
+% CPD = linear_gaussian_CPD(bnet, self, theta, lambda)
+% This defines the distribution P(Y|X) = N(y | theta'*x, sigma),
+% where y (self) is a scalar, theta is a regression vector, and sigma is the variance.
+% Pass in [] to generate a default random value for a parameter.
+%
+% CPD = linear_gaussian_CPD(bnet, self, [], [], theta0, n0, alpha0, beta0)
+% defines a Normal-Gamma prior over the parameters:
+% P(theta | lambda) = N(theta | theta0, n0*lambda)
+% P(lambda) = Gamma(lambda | alpha0, beta0)
+% where lambda = 1/sigma is the precision for y.
+% n0 is a precision matrix, beta0 is a scale factor.
+% Pass in [] to generate a default value for a hyperparameter.
+% theta and sigma will be set to their prior expected values.
+% See "Bayesian Theory", Bernardo and Smith (2000), p442.
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'linear_gaussian_CPD', generic_CPD(0));
+ return;
+elseif isa(bnet, 'linear_gaussian_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, self);
+d = sum(ns(ps));
+assert(ns(self)==1);
+
+
+if nargin < 5,
+ prior = [];
+ if isempty(theta), theta = randn(d, 1); end
+ if isempty(sigma), sigma = 1; end
+else
+
+ %if isempty(theta0), theta0 = zeros(d, 1); end
+ %if isempty(n0), n0 = 0.1*eye(d); end
+ %if isempty(alpha0), alpha0 = 0.1; end
+ %if isempty(beta0), beta0 = 0.1; end
+
+ % use non-informative priors
+ if isempty(theta0), theta0 = zeros(d, 1); end
+ if isempty(n0), n0 = 0.001*ones(d); end
+ if isempty(alpha0), alpha0 = -d/2 + 0.001; end
+ if isempty(beta0), beta0 = 0.001; end
+
+ prior.theta = theta0;
+ prior.n = n0;
+ prior.alpha = alpha0;
+ prior.beta = beta0;
+
+ % set params to their mean
+ theta = prior.theta;
+ %sigma = prior.beta/prior.alpha; % mean of Gamma is E[lambda] = alpha/beta
+end
+
+
+CPD.self = self;
+CPD.theta = theta;
+CPD.sigma = sigma;
+CPD.prior = prior;
+
+
+clamped = 0;
+CPD = class(CPD, 'linear_gaussian_CPD', generic_CPD(clamped));
+
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.self = [];
+CPD.theta = [];
+CPD.sigma = [];
+CPD.prior = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/log_marg_prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/log_marg_prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+function L = log_marg_prob_node(CPD, self_ev, pev)
+% LOG_MARG_PROB_NODE Compute prod_m log P(x(i,m)| x(pi_i,m)) for node i (linear_gaussian)
+% L = log_marg_prob_node(CPD, self_ev, pev)
+%
+% This differs from log_prob_node because we integrate out the parameters.
+% self_ev{m} is the evidence on this node in case m.
+% pev{i,m} is the evidence on the i'th parent in case m
+% We assume there is <= 1 case.
+
+ncases = length(self_ev);
+
+if ncases==0
+ L = 0;
+ return;
+elseif ncases==1
+ y = self_ev{1};
+ x = cat(1, pev{:}); % column vector
+ f = 1-x'*inv(x*x' + CPD.prior.n)*x;
+ alpha = CPD.prior.alpha;
+ L = log_student_pdf(y, x'*CPD.prior.theta, f*alpha/CPD.prior.beta, 2*alpha);
+else
+ error('can''t handle batch data');
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/update_params_complete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@linear_gaussian_CPD/update_params_complete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function CPD = update_params_complete(CPD, self_ev, pev)
+% UPDATE_PARAMS_COMPLETE Bayesian parameter updating given completely observed data (linear_gaussian)
+% CPD = update_params_complete(CPD, self_ev, pev)
+%
+% self_ev{m} is the evidence on this node in case m.
+% pev{i,m} is the evidence on the i'th parent in case m
+%
+% We update the hyperparams and set the params to the mean of the posterior.
+
+y = cat(1, self_ev{:});
+X = cell2num(pev)';
+[N k] = size(X); % each row is a case
+
+n0 = CPD.prior.n;
+th0 = CPD.prior.theta;
+CPD.prior.theta = inv(n0 + X'*X)*(n0*th0 + X'*y);
+thn = CPD.prior.theta;
+CPD.prior.beta = CPD.prior.beta + 0.5*(y-X*thn)'*y + 0.5*(th0-thn)'*n0*th0;
+CPD.prior.alpha = CPD.prior.alpha + 0.5*N;
+CPD.prior.n = CPD.prior.n + X'*X;
+
+
+% set params to their mean
+CPD.theta = CPD.prior.theta;
+%CPD.sigma = CPD.prior.beta/CPD.prior.alpha; % mean of Gamma is E[lambda] = alpha/beta
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/log_marg_prob_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/root_gaussian_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/update_params_complete.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/Old/@root_gaussian_CPD
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/log_marg_prob_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/log_marg_prob_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function L = log_marg_prob_node(CPD, self_ev, pev)
+% LOG_MARG_PROB_NODE Compute prod_m log P(x(i,m)| x(pi_i,m)) for node i (root_gaussian)
+% L = log_marg_prob_node(CPD, self_ev, pev)
+%
+% This differs from log_prob_node because we integrate out the parameters.
+% self_ev{m} is the evidence on this node in case m.
+% pev{i,m} is the evidence on the i'th parent in case m (ignored).
+
+ncases = length(self_ev);
+
+if ncases==0
+ L = 0;
+ return;
+elseif ncases==1
+ x = cat(1, self_ev{:});
+ k = length(x);
+ n0 = CPD.prior.n;
+ mu = CPD.prior.mu;
+ alpha = CPD.prior.alpha;
+ beta = CPD.prior.beta;
+ gamma = 2*alpha - k + 1;
+ % Bernardo and Smith p441
+ L = log_student_pdf(x, mu, n0/(n0+1)*0.5*gamma*inv(beta), gamma);
+else
+ error('can''t handle batch data');
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/root_gaussian_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/root_gaussian_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,74 @@
+function CPD = root_gaussian_CPD(bnet, self, mu, Sigma, mu0, n0, alpha0, beta0)
+% ROOT_GAUSSIAN_CPD Make an unconditional Gaussian distrib.
+%
+% CPD = root_gaussian_CPD(bnet, self, mu, Sigma)
+% This defines the distribution Y ~ N(mu, Sigma),
+% Pass in [] to generate a default random value for a parameter.
+%
+% CPD = root_gaussian_CPD(bnet, self, [], [], mu0, n0, alpha0, beta0)
+% defines a Normal-Wishart prior over the parameters:
+% P(mu | lambda) = N(mu | mu0, n0*lambda)
+% P(lambda) = Wishart(lambda | alpha0, beta0)
+% where lambda = inv(Sigma) is the precision matrix of mu.
+% n0 is a scale factor, beta0 is a precision matrix.
+% Pass in [] to generate a default value for a hyperparameter.
+% mu and Sigma will be set to their prior expected values.
+% See "Bayesian Theory", Bernardo and Smith (2000), p441.
+
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'root_gaussian_CPD', generic_CPD(0));
+ return;
+elseif isa(bnet, 'root_gaussian_CPD')
+ % This might occur if we are copying an object.
+ CPD = bnet;
+ return;
+end
+CPD = init_fields;
+
+
+ns = bnet.node_sizes;
+d = ns(self);
+
+if nargin < 5,
+ prior = [];
+ if isempty(mu), mu = randn(d, 1); end
+ if isempty(Sigma), Sigma = eye(d); end
+else
+ if isempty(mu0), mu0 = zeros(d, 1); end
+ if isempty(n0), n0 = 0.1; end
+ if isempty(alpha0), alpha0 = (d-1)/2 + 1; end % Wishart requires 2 alpha > d-1
+ if isempty(beta0), beta0 = eye(d); end
+
+ prior.mu = mu0;
+ prior.n = n0;
+ prior.alpha = alpha0;
+ prior.beta = beta0;
+
+ % set params to their mean
+ mu = prior.mu;
+ Sigma = prior.beta/prior.alpha; % mean of Wishart is E[lambda] = alpha*inv(beta)
+end
+
+CPD.self = self;
+CPD.mu = mu;
+CPD.Sigma = Sigma;
+CPD.prior = prior;
+
+clamped = 0;
+CPD = class(CPD, 'root_gaussian_CPD', generic_CPD(clamped));
+
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.self = [];
+CPD.mu = [];
+CPD.Sigma = [];
+CPD.prior = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/update_params_complete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@root_gaussian_CPD/update_params_complete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function CPD = update_params_complete(CPD, self_ev, pev)
+% UPDATE_PARAMS_COMPLETE Bayesian parameter updating given completely observed data (root_gaussian)
+% CPD = update_params_complete(CPD, self_ev, pev)
+%
+% self_ev{m} is the evidence on this node in case m.
+% pev{i,m} is the evidence on the i'th parent in case m (ignored)
+%
+% We update the hyperparams and set the params to the mean of the posterior.
+
+X = cell2num(self_ev);
+[k N] = size(X); % each column is a case
+
+one = ones(N,1);
+xbar = X*one / N; % = mean(X')'
+S = X*(eye(N) - one*one'/N)*X';
+
+n0 = CPD.prior.n;
+nn = 1/(n0 + N);
+mu0 = CPD.prior.mu;
+CPD.prior.mu = nn*(n0*mu0 + N*xbar);
+CPD.prior.alpha = CPD.prior.alpha + 0.5*N;
+CPD.prior.beta = CPD.prior.beta + 0.5*S + 0.5*nn*N*n0*(mu0-xbar)*(mu0-xbar)';
+CPD.prior.n = CPD.prior.n + N;
+
+% set params to their mean
+CPD.mu = CPD.prior.mu;
+% E[Cov] = E inv(n lambda) = 1/(n (alpha-(k+1)/2)) beta
+CPD.Sigma = CPD.prior.beta /(CPD.prior.n * (CPD.prior.alpha - (k+1)/2));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/CPD_to_upot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/CPD_to_upot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function pot = CPD_to_upot(CPD, domain)
+% CPD_TO_UPOT Convert a CPD to a utility potential
+% pot = CPD_to_upot(CPD, domain)
+
+sz = CPD.size; % mysize(CPD.CPT);
+pot = upot(domain, sz, CPD.CPT, 0*myones(sz));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+/CPD_to_upot.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/tabular_chance_node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/Old/@tabular_chance_node
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/tabular_chance_node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/@tabular_chance_node/tabular_chance_node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function CPD = tabular_chance_node(sz, CPT)
+% TABULAR_CHANCE_NODE Like tabular_CPD, but simplified
+% CPD = tabular_chance_node(sz, CPT)
+%
+% sz(1:end-1) is the sizes of the parents, sz(end) is the size of this node
+% By default, CPT is a random stochastic matrix.
+
+if nargin==0
+ % This occurs if we are trying to load an object from a file.
+ CPD = init_fields;
+ CPD = class(CPD, 'tabular_chance_node');
+ return;
+elseif isa(sz, 'tabular_chance_node')
+ % This might occur if we are copying an object.
+ CPD = sz;
+ return;
+end
+CPD = init_fields;
+
+if nargin < 2,
+ CPT = mk_stochastic(myones(sz));
+else
+ CPT = myreshape(CPT, sz);
+end
+
+CPD.CPT = CPT;
+CPD.size = sz;
+
+CPD = class(CPD, 'tabular_chance_node');
+
+%%%%%%%%%%%
+
+function CPD = init_fields()
+% This ensures we define the fields in the same order
+% no matter whether we load an object from a file,
+% or create it from scratch. (Matlab requires this.)
+
+CPD.CPT = [];
+CPD.size = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+A D/@linear_gaussian_CPD////
+A D/@root_gaussian_CPD////
+A D/@tabular_chance_node////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/CPDs/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/CPDs/mk_isolated_tabular_CPD.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/mk_isolated_tabular_CPD.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function CPD = mk_isolated_tabular_CPD(fam_sz, args)
+% function CPD = mk_isolated_tabular_CPD(fam_sz, args)
+% function CPD = mk_isolated_tabular_CPD(fam_sz, args)
+% Make a single CPD by creating a mini-bnet containing just this one family.
+% This is necessary because the CPD constructor requires a bnet.
+
+n = length(fam_sz);
+dag = zeros(n,n);
+ps = 1:(n-1);
+if ~isempty(ps)
+ dag(ps,n) = 1;
+end
+bnet = mk_bnet(dag, fam_sz);
+CPD = tabular_CPD(bnet, n, args{:});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/README.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/README.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+The Bayes Net Toolbox for Matlab was written by Kevin Patrick Murphy et al.
+This version (FullBNT_1.0.6) was last updated on 5/9/2010, by Wei Sun.
+
+BNT has moved to bnt.googlecode.com, in Jan. 2010.
+To download the latest version, and to get documentation, please go to
+ http://code.google.com/p/bnt/
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/copyright.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/copyright.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+Written by Kevin Murphy (murphyk@ai.mit.edu), 1997-2003
+Distributed under the GNU Library GPL - see license.gpl for details.
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/dummy/1.1.1.1/Sat Jan 18 22:22:06 2003//
+D/dynamic////
+D/limids////
+D/static////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+/arhmm1.m/1.1.1.1/Thu Nov 14 01:03:34 2002//
+/bat1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/bkff1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/chmm1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/cmp_inference_dbn.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/cmp_learning_dbn.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/cmp_online_inference.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/dhmm1.m/1.1.1.1/Sun May 4 22:23:18 2003//
+/ehmm1.m/1.1.1.1/Sat Jan 18 22:16:24 2003//
+/fhmm_infer.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/filter_test1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/ghmm1.m/1.1.1.1/Sun May 4 22:23:32 2003//
+/ho1.m/1.1.1.1/Fri Mar 28 17:22:36 2003//
+/jtree_clq_test.m/1.1.1.1/Sat Jan 18 22:16:38 2003//
+/jtree_clq_test2.m/1.1.1.1/Thu Oct 10 23:45:12 2002//
+/kalman1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/kjaerulff1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/loopy_dbn1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mhmm1.m/1.1.1.1/Sun May 4 22:23:40 2003//
+/mildew1.m/1.1.1.1/Thu Jun 20 20:30:24 2002//
+/mk_bat_dbn.m/1.1.1.1/Mon Jun 7 19:07:18 2004//
+/mk_chmm.m/1.1.1.1/Tue May 11 19:23:14 2004//
+/mk_collage_from_clqs.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_fhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_mildew_dbn.m/1.1.1.1/Thu Oct 10 23:14:36 2002//
+/mk_orig_bat_dbn.m/1.1.1.1/Wed Feb 4 23:53:06 2004//
+/mk_orig_water_dbn.m/1.1.1.1/Sat Jan 31 02:57:52 2004//
+/mk_ps_from_clqs.m/1.1.1.1/Wed Oct 9 20:36:56 2002//
+/mk_uffe_dbn.m/1.1.1.1/Thu Oct 10 23:14:54 2002//
+/mk_water_dbn.m/1.1.1.1/Tue May 11 18:45:38 2004//
+/orig_water1.m/1.1.1.1/Mon Nov 22 22:41:42 2004//
+/reveal1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/scg_dbn.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/skf_data_assoc_gmux.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/viterbi1.m/1.1.1.1/Tue May 13 14:35:40 2003//
+/water1.m/1.1.1.1/Thu Nov 14 20:07:56 2002//
+/water2.m/1.1.1.1/Thu Nov 14 20:33:42 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+A D/HHMM////
+A D/Old////
+A D/SLAM////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+/abcd_hhmm.m/1.1.1.1/Sat Sep 21 21:37:54 2002//
+/add_hhmm_end_state.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/hhmm_jtree_clqs.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_hhmm.m/1.1.1.1/Sat Sep 21 20:58:06 2002//
+/mk_hhmm_topo.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_hhmm_topo_F1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/pretty_print_hhmm_parse.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/remove_hhmm_end_state.m/1.1.1.1/Mon Dec 16 19:16:50 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+A D/Map////
+A D/Mgram////
+A D/Motif////
+A D/Old////
+A D/Square////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/HHMM
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/disp_map_hhmm.m/1.1.1.1/Tue Sep 24 22:45:56 2002//
+/learn_map.m/1.1.1.1/Sat Jan 11 18:48:46 2003//
+/mk_map_hhmm.m/1.1.1.1/Tue Sep 24 10:49:52 2002//
+/mk_rnd_map_hhmm.m/1.1.1.1/Tue Sep 24 22:13:48 2002//
+/sample_from_map.m/1.1.1.1/Tue Sep 24 13:02:30 2002//
+D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/HHMM/Map
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/mk_map_hhmm.m/1.1.1.1/Tue Sep 24 07:02:44 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/HHMM/Map/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/Old/mk_map_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/Old/mk_map_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,156 @@
+function bnet = mk_map_hhmm(varargin)
+
+% p is the prob of a successful move (defines the reliability of motors)
+p = 1;
+num_obs_nodes = 1;
+
+for i=1:2:length(varargin)
+ switch varargin{i},
+ case 'p', p = varargin{i+1};
+ case 'numobs', num_obs_node = varargin{i+1};
+ end
+end
+
+
+q = 1-p;
+
+% assign numbers to the nodes in topological order
+U = 1; A = 2; C = 3; F = 4; O = 5;
+
+% create graph structure
+
+ss = 5; % slice size
+intra = zeros(ss,ss);
+intra(U,F)=1;
+intra(A,[C F O])=1;
+intra(C,[F O])=1;
+
+inter = zeros(ss,ss);
+inter(U,[A C])=1;
+inter(A,[A C])=1;
+inter(F,[A C])=1;
+inter(C,C)=1;
+
+% node sizes
+ns = zeros(1,ss);
+ns(U) = 2; % left/right
+ns(A) = 2;
+ns(C) = 3;
+ns(F) = 2;
+ns(O) = 5; % we will assign each state a unique symbol
+l = 1; r = 2; % left/right
+L = 1; R = 2;
+
+% Make the DBN
+bnet = mk_dbn(intra, inter, ns, 'observed', O);
+eclass = bnet.equiv_class;
+
+
+
+% Define CPDs for slice 1
+% We clamp all of them, i.e., do not try to learn them.
+
+% uniform probs over actions (the input could be chosen from a policy)
+bnet.CPD{eclass(U,1)} = tabular_CPD(bnet, U, 'CPT', mk_stochastic(ones(ns(U),1)), ...
+ 'adjustable', 0);
+
+% uniform probs over starting abstract state
+bnet.CPD{eclass(A,1)} = tabular_CPD(bnet, A, 'CPT', mk_stochastic(ones(ns(A),1)), ...
+ 'adjustable', 0);
+
+% Uniform probs over starting concrete state, modulo the fact
+% that corridor 2 is only of length 2.
+CPT = zeros(ns(A), ns(C)); % CPT(i,j) = P(C starts in j | A=i)
+CPT(1, :) = [1/3 1/3 1/3];
+CPT(2, :) = [1/2 1/2 0];
+bnet.CPD{eclass(C,1)} = tabular_CPD(bnet, C, 'CPT', CPT, 'adjustable', 0);
+
+% Termination probs
+CPT = zeros(ns(U), ns(A), ns(C), ns(F));
+CPT(r,1,1,:) = [1 0];
+CPT(r,1,2,:) = [1 0];
+CPT(r,1,3,:) = [q p];
+CPT(r,2,1,:) = [1 0];
+CPT(r,2,2,:) = [q p];
+CPT(l,1,1,:) = [q p];
+CPT(l,1,2,:) = [1 0];
+CPT(l,1,3,:) = [1 0];
+CPT(l,2,1,:) = [q p];
+CPT(l,2,2,:) = [1 0];
+
+bnet.CPD{eclass(F,1)} = tabular_CPD(bnet, F, 'CPT', CPT);
+
+
+% Assign each state a unique observation
+CPT = zeros(ns(A), ns(C), ns(O));
+CPT(1,1,1)=1;
+CPT(1,2,2)=1;
+CPT(1,3,3)=1;
+CPT(2,1,4)=1;
+CPT(2,2,5)=1;
+%CPT(2,3,:) undefined
+
+bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', CPT);
+
+
+% Define the CPDs for slice 2
+
+% Abstract
+
+% Since the top level never resets, the starting distribution is irrelevant:
+% A2 will be determined by sampling from transmat(A1,:).
+% But the code requires we specify it anyway; we make it all 0s, a dummy value.
+startprob = zeros(ns(U), ns(A));
+
+transmat = zeros(ns(U), ns(A), ns(A));
+transmat(R,1,:) = [q p];
+transmat(R,2,:) = [0 1];
+transmat(L,1,:) = [1 0];
+transmat(L,2,:) = [p q];
+
+% Qps are the parents we condition the parameters on, in this case just
+% the past action.
+bnet.CPD{eclass(A,2)} = hhmm2Q_CPD(bnet, A+ss, 'Fbelow', F, ...
+ 'startprob', startprob, 'transprob', transmat);
+
+
+
+% Concrete
+
+transmat = zeros(ns(C), ns(U), ns(A), ns(C));
+transmat(1,r,1,:) = [q p 0.0];
+transmat(2,r,1,:) = [0.0 q p];
+transmat(3,r,1,:) = [0.0 0.0 1.0];
+transmat(1,r,2,:) = [q p 0.0];
+transmat(2,r,2,:) = [0.0 1.0 0.0];
+%
+transmat(1,l,1,:) = [1.0 0.0 0.0];
+transmat(2,l,1,:) = [p q 0.0];
+transmat(3,l,1,:) = [0.0 p q];
+transmat(1,l,2,:) = [1.0 0.0 0.0];
+transmat(2,l,2,:) = [p q 0.0];
+
+% Add a new dimension for A(t-1), by copying old vals,
+% so the matrix is the same size as startprob
+
+
+transmat = reshape(transmat, [ns(C) ns(U) ns(A) 1 ns(C)]);
+transmat = repmat(transmat, [1 1 1 ns(A) 1]);
+
+% startprob(C(t-1), U(t-1), A(t-1), A(t), C(t))
+startprob = zeros(ns(C), ns(U), ns(A), ns(A), ns(C));
+startprob(1,L,1,1,:) = [1.0 0.0 0.0];
+startprob(3,R,1,2,:) = [1.0 0.0 0.0];
+startprob(3,R,1,1,:) = [0.0 0.0 1.0];
+%
+startprob(1,L,2,1,:) = [0.0 0.0 010];
+startprob(2,L,2,1,:) = [1.0 0.0 0.0];
+startprob(2,R,2,2,:) = [0.0 1.0 0.0];
+
+% want transmat(U,A,C,At,Ct), ie. in topo order
+transmat = permute(transmat, [2 3 1 4 5]);
+startprob = permute(startprob, [2 3 1 4 5]);
+bnet.CPD{eclass(C,2)} = hhmm2Q_CPD(bnet, C+ss, 'Fself', F, ...
+ 'startprob', startprob, 'transprob', transmat);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/disp_map_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/disp_map_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function disp_map_hhmm(bnet)
+
+eclass = bnet.equiv_class;
+U = 1; A = 2; C = 3; F = 4;
+
+S = struct(bnet.CPD{eclass(A,2)});
+disp('abstract trans')
+dispcpt(S.transprob)
+
+S = struct(bnet.CPD{eclass(C,2)});
+disp('concrete trans for go left') % UAC AC
+dispcpt(squeeze(S.transprob(1,:,:,:,:)))
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/learn_map.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/learn_map.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+obs_model = 'unique'; % each cell has a unique label (essentially fully observable)
+%obs_model = 'four'; % each cell generates 4 observations, NESW
+
+% Generate the true network, and a randomization of it
+realnet = mk_map_hhmm('p', 0.9, 'obs_model', obs_model);
+rndnet = mk_rnd_map_hhmm('obs_model', obs_model);
+eclass = realnet.equiv_class;
+U = 1; A = 2; C = 3; F = 4; onodes = 5;
+
+ss = realnet.nnodes_per_slice;
+T = 100;
+evidence = sample_dbn(realnet, 'length', T);
+ev = cell(ss,T);
+ev(onodes,:) = evidence(onodes,:);
+
+infeng = jtree_dbn_inf_engine(rndnet);
+
+if 0
+% suppose we do not observe the final finish node, but only know
+% it is more likely to be on that off
+ev2 = ev;
+infeng = enter_evidence(infeng, ev2, 'soft_evidence_nodes', [F T], 'soft_evidence', {[0.3 0.7]'});
+end
+
+
+learnednet = learn_params_dbn_em(infeng, {evidence}, 'max_iter', 5);
+
+disp('real model')
+disp_map_hhmm(realnet)
+
+disp('learned model')
+disp_map_hhmm(learnednet)
+
+disp('rnd model')
+disp_map_hhmm(rndnet)
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/mk_map_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/mk_map_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,181 @@
+function bnet = mk_map_hhmm(varargin)
+
+% p is the prob of a successful move (defines the reliability of motors)
+p = 1;
+obs_model = 'unique';
+
+for i=1:2:length(varargin)
+ switch varargin{i},
+ case 'p', p = varargin{i+1};
+ case 'obs_model', obs_model = varargin{i+1};
+ end
+end
+
+
+q = 1-p;
+unique_obs = strcmp(obs_model, 'unique');
+
+% assign numbers to the nodes in topological order
+U = 1; A = 2; C = 3; F = 4;
+if unique_obs
+ onodes = 5;
+else
+ N = 5; E = 6; S = 7; W = 8; % north, east, south, west
+ onodes = [N E S W];
+end
+
+% create graph structure
+
+ss = 4 + length(onodes); % slice size
+intra = zeros(ss,ss);
+intra(U,F)=1;
+intra(A,[C F onodes])=1;
+intra(C,[F onodes])=1;
+
+inter = zeros(ss,ss);
+inter(U,[A C])=1;
+inter(A,[A C])=1;
+inter(F,[A C])=1;
+inter(C,C)=1;
+
+% node sizes
+ns = zeros(1,ss);
+ns(U) = 2; % left/right
+ns(A) = 2;
+ns(C) = 3;
+ns(F) = 2;
+if unique_obs
+ ns(onodes) = 5; % we will assign each state a unique symbol
+else
+ ns(onodes) = 2;
+end
+l = 1; r = 2; % left/right
+L = 1; R = 2;
+
+% Make the DBN
+bnet = mk_dbn(intra, inter, ns, 'observed', onodes);
+eclass = bnet.equiv_class;
+
+
+
+% Define CPDs for slice 1
+% We clamp all the CPDs that are not tied,
+% since we cannot learn them from a single sequence.
+
+% uniform probs over actions (the input could be chosen from a policy)
+bnet.CPD{eclass(U,1)} = tabular_CPD(bnet, U, 'CPT', mk_stochastic(ones(ns(U),1)), ...
+ 'adjustable', 0);
+
+% uniform probs over starting abstract state
+bnet.CPD{eclass(A,1)} = tabular_CPD(bnet, A, 'CPT', mk_stochastic(ones(ns(A),1)), ...
+ 'adjustable', 0);
+
+% Uniform probs over starting concrete state, modulo the fact
+% that corridor 2 is only of length 2.
+CPT = zeros(ns(A), ns(C)); % CPT(i,j) = P(C starts in j | A=i)
+CPT(1, :) = [1/3 1/3 1/3];
+CPT(2, :) = [1/2 1/2 0];
+bnet.CPD{eclass(C,1)} = tabular_CPD(bnet, C, 'CPT', CPT, 'adjustable', 0);
+
+% Termination probs
+CPT = zeros(ns(U), ns(A), ns(C), ns(F));
+CPT(r,1,1,:) = [1 0];
+CPT(r,1,2,:) = [1 0];
+CPT(r,1,3,:) = [q p];
+CPT(r,2,1,:) = [1 0];
+CPT(r,2,2,:) = [q p];
+CPT(l,1,1,:) = [q p];
+CPT(l,1,2,:) = [1 0];
+CPT(l,1,3,:) = [1 0];
+CPT(l,2,1,:) = [q p];
+CPT(l,2,2,:) = [1 0];
+
+bnet.CPD{eclass(F,1)} = tabular_CPD(bnet, F, 'CPT', CPT);
+
+
+% Observation model
+if unique_obs
+ CPT = zeros(ns(A), ns(C), 5);
+ CPT(1,1,1)=1; % Theo state 4
+ CPT(1,2,2)=1; % Theo state 5
+ CPT(1,3,3)=1; % Theo state 6
+ CPT(2,1,4)=1; % Theo state 9
+ CPT(2,2,5)=1; % Theo state 10
+ %CPT(2,3,:) undefined
+ O = onodes(1);
+ bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', CPT);
+else
+ % north/east/south/west can see wall (1) or opening (2)
+ CPT = zeros(ns(A), ns(C), 2);
+ CPT(:,:,1) = q;
+ CPT(:,:,2) = p;
+ bnet.CPD{eclass(W,1)} = tabular_CPD(bnet, W, 'CPT', CPT);
+ bnet.CPD{eclass(E,1)} = tabular_CPD(bnet, E, 'CPT', CPT);
+ CPT = zeros(ns(A), ns(C), 2);
+ CPT(:,:,1) = p;
+ CPT(:,:,2) = q;
+ bnet.CPD{eclass(S,1)} = tabular_CPD(bnet, S, 'CPT', CPT);
+ bnet.CPD{eclass(N,1)} = tabular_CPD(bnet, N, 'CPT', CPT);
+end
+
+% Define the CPDs for slice 2
+
+% Abstract
+
+% Since the top level never resets, the starting distribution is irrelevant:
+% A2 will be determined by sampling from transmat(A1,:).
+% But the code requires we specify it anyway; we make it all 0s, a dummy value.
+startprob = zeros(ns(U), ns(A));
+
+transmat = zeros(ns(U), ns(A), ns(A));
+transmat(R,1,:) = [q p];
+transmat(R,2,:) = [0 1];
+transmat(L,1,:) = [1 0];
+transmat(L,2,:) = [p q];
+
+% Qps are the parents we condition the parameters on, in this case just
+% the past action.
+bnet.CPD{eclass(A,2)} = hhmm2Q_CPD(bnet, A+ss, 'Fbelow', F, ...
+ 'startprob', startprob, 'transprob', transmat);
+
+
+
+% Concrete
+
+transmat = zeros(ns(C), ns(U), ns(A), ns(C));
+transmat(1,r,1,:) = [q p 0.0];
+transmat(2,r,1,:) = [0.0 q p];
+transmat(3,r,1,:) = [0.0 0.0 1.0];
+transmat(1,r,2,:) = [q p 0.0];
+transmat(2,r,2,:) = [0.0 1.0 0.0];
+%
+transmat(1,l,1,:) = [1.0 0.0 0.0];
+transmat(2,l,1,:) = [p q 0.0];
+transmat(3,l,1,:) = [0.0 p q];
+transmat(1,l,2,:) = [1.0 0.0 0.0];
+transmat(2,l,2,:) = [p q 0.0];
+
+% Add a new dimension for A(t-1), by copying old vals,
+% so the matrix is the same size as startprob
+
+
+transmat = reshape(transmat, [ns(C) ns(U) ns(A) 1 ns(C)]);
+transmat = repmat(transmat, [1 1 1 ns(A) 1]);
+
+% startprob(C(t-1), U(t-1), A(t-1), A(t), C(t))
+startprob = zeros(ns(C), ns(U), ns(A), ns(A), ns(C));
+startprob(1,L,1,1,:) = [1.0 0.0 0.0];
+startprob(3,R,1,2,:) = [1.0 0.0 0.0];
+startprob(3,R,1,1,:) = [0.0 0.0 1.0];
+%
+startprob(1,L,2,1,:) = [0.0 0.0 010];
+startprob(2,L,2,1,:) = [1.0 0.0 0.0];
+startprob(2,R,2,2,:) = [0.0 1.0 0.0];
+
+% want transmat(U,A,C,At,Ct), ie. in topo order
+transmat = permute(transmat, [2 3 1 4 5]);
+startprob = permute(startprob, [2 3 1 4 5]);
+bnet.CPD{eclass(C,2)} = hhmm2Q_CPD(bnet, C+ss, 'Fself', F, ...
+ 'startprob', startprob, 'transprob', transmat);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/mk_rnd_map_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/mk_rnd_map_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,73 @@
+function bnet = mk_rnd_map_hhmm(varargin)
+
+% We copy the deterministic structure of the real HHMM,
+% but randomize the probabilities of the adjustable CPDs.
+% The key trick is that 0s in the real HHMM remain 0
+% even when multiplied by a randon number.
+
+obs_model = 'unique';
+
+for i=1:2:length(varargin)
+ switch varargin{i},
+ case 'obs_model', obs_model = varargin{i+1};
+ end
+end
+
+
+unique_obs = strcmp(obs_model, 'unique');
+
+psuccess = 0.9;
+% must be less than 1, so that pfail > 0
+% otherwise we copy too many 0s
+bnet = mk_map_hhmm('p', psuccess, 'obs_model', obs_model);
+ns = bnet.node_sizes;
+ss = bnet.nnodes_per_slice;
+
+U = 1; A = 2; C = 3; F = 4;
+%unique_obs = (bnet.nnodes_per_slice == 5);
+if unique_obs
+ onodes = 5;
+else
+ north = 5; east = 6; south = 7; west = 8;
+ onodes = [north east south west];
+end
+
+eclass = bnet.equiv_class;
+S=struct(bnet.CPD{eclass(F,1)});
+CPT = mk_stochastic(rand(size(S.CPT)) .* S.CPT);
+bnet.CPD{eclass(F,1)} = tabular_CPD(bnet, F, 'CPT', CPT);
+
+
+% Observation model
+if unique_obs
+ CPT = zeros(ns(A), ns(C), 5);
+ CPT(1,1,1)=1; % Theo state 4
+ CPT(1,2,2)=1; % Theo state 5
+ CPT(1,3,3)=1; % Theo state 6
+ CPT(2,1,4)=1; % Theo state 9
+ CPT(2,2,5)=1; % Theo state 10
+ %CPT(2,3,:) undefined
+ O = onodes(1);
+ bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', CPT);
+else
+ for i=[north east south west]
+ CPT = mk_stochastic(rand(ns(A), ns(C), 2));
+ bnet.CPD{eclass(i,1)} = tabular_CPD(bnet, i, 'CPT', CPT);
+ end
+end
+
+% Define the CPDs for slice 2
+
+startprob = zeros(ns(U), ns(A));
+S = struct(bnet.CPD{eclass(A,2)});
+transprob = mk_stochastic(rand(size(S.transprob)) .* S.transprob);
+bnet.CPD{eclass(A,2)} = hhmm2Q_CPD(bnet, A+ss, 'Fbelow', F, ...
+ 'startprob', startprob, 'transprob', transprob);
+
+S = struct(bnet.CPD{eclass(C,2)});
+transprob = mk_stochastic(rand(size(S.transprob)) .* S.transprob);
+startprob = mk_stochastic(rand(size(S.startprob)) .* S.startprob);
+bnet.CPD{eclass(C,2)} = hhmm2Q_CPD(bnet, C+ss, 'Fself', F, ...
+ 'startprob', startprob, 'transprob', transprob);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/sample_from_map.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Map/sample_from_map.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+if 0
+% Generate some sample paths
+
+bnet = mk_map_hhmm('p', 1);
+% assign numbers to the nodes in topological order
+U = 1; A = 2; C = 3; F = 4; O = 5;
+
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+% control policy = sweep right then left
+T = 10;
+ss = 5;
+ev = cell(ss, T);
+ev(U,:) = num2cell([R*ones(1,5) L*ones(1,5)]);
+
+% fix initial conditions to be in left most state
+ev{A,1} = 1;
+ev{C,1} = 1;
+evidence = sample_dbn(bnet, 'length', T, 'evidence', ev)
+
+
+% Now do same but with noisy actuators
+
+bnet = mk_map_hhmm('p', 0.8);
+evidence = sample_dbn(bnet, 'length', T, 'evidence', ev)
+
+end
+
+% Now do same but with 4 observations per slice
+
+bnet = mk_map_hhmm('p', 0.8, 'obs_model', 'four');
+ss = bnet.nnodes_per_slice;
+
+ev = cell(ss, T);
+ev(U,:) = num2cell([R*ones(1,5) L*ones(1,5)]);
+ev{A,1} = 1;
+ev{C,1} = 1;
+evidence = sample_dbn(bnet, 'length', T, 'evidence', ev)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/letter2num.m/1.1.1.1/Fri Nov 22 23:10:20 2002//
+/mgram1.m/1.1.1.1/Fri Nov 22 23:59:00 2002//
+/mgram2.m/1.1.1.1/Tue Nov 26 22:04:24 2002//
+/mgram3.m/1.1.1.1/Tue Nov 26 22:14:10 2002//
+/num2letter.m/1.1.1.1/Fri Nov 22 23:07:40 2002//
+D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/HHMM/Mgram
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/mgram2.m/1.1.1.1/Sat Nov 23 00:44:34 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/HHMM/Mgram/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/Old/mgram2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/Old/mgram2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,191 @@
+% like mgram1, except we use a durational HMM instead of an HHMM2
+
+past = 0;
+
+words = {'the', 't', 'h', 'e'};
+data = 'the';
+nwords = length(words);
+word_len = zeros(1, nwords);
+word_prob = normalise(ones(1,nwords));
+word_logprob = log(word_prob);
+for wi=1:nwords
+ word_len(wi)=length(words{wi});
+end
+D = max(word_len);
+
+
+alphasize = 26*2;
+data = letter2num(data);
+T = length(data);
+
+% node numbers
+W = 1; % top level state = word id
+L = 2; % bottom level state = letter position within word
+F = 3;
+O = 4;
+
+ss = 4;
+intra = zeros(ss,ss);
+intra(W,[F L O])=1;
+intra(L,[O F])=1;
+
+inter = zeros(ss,ss);
+inter(W,W)=1;
+inter(L,L)=1;
+inter(F,[W L O])=1;
+
+% node sizes
+ns = zeros(1,ss);
+ns(W) = nwords;
+ns(L) = D;
+ns(F) = 2;
+ns(O) = alphasize;
+ns2 = [ns ns];
+
+% Make the DBN
+bnet = mk_dbn(intra, inter, ns, 'observed', O);
+eclass = bnet.equiv_class;
+
+% uniform start distrib over words, uniform trans mat
+Wstart = normalise(ones(1,nwords));
+Wtrans = mk_stochastic(ones(nwords,nwords));
+
+% always start in state d = length(word) for each bottom level HMM
+Lstart = zeros(nwords, D);
+for i=1:nwords
+ l = length(words{i});
+ Lstart(i,l)=1;
+end
+
+% make downcounters
+RLtrans = mk_rightleft_transmat(D, 0); % 0 self loop prob
+Ltrans = repmat(RLtrans, [1 1 nwords]);
+
+% Finish when downcoutner = 1
+Fprob = zeros(nwords, D, 2);
+Fprob(:,1,2)=1;
+Fprob(:,2:end,1)=1;
+
+
+% Define CPDs for slice
+bnet.CPD{eclass(W,1)} = tabular_CPD(bnet, W, 'CPT', Wstart);
+bnet.CPD{eclass(L,1)} = tabular_CPD(bnet, L, 'CPT', Lstart);
+bnet.CPD{eclass(F,1)} = tabular_CPD(bnet, F, 'CPT', Fprob);
+
+
+% Define CPDs for slice 2
+bnet.CPD{eclass(W,2)} = hhmmQ_CPD(bnet, W+ss, 'Fbelow', F, 'startprob', Wstart, 'transprob', Wtrans);
+bnet.CPD{eclass(L,2)} = hhmmQ_CPD(bnet, L+ss, 'Fself', F, 'Qps', W+ss, 'startprob', Lstart, 'transprob', Ltrans);
+
+
+if 0
+% To test it is generating correctly, we create an artificial
+% observation process that capitalizes at the start of a new segment
+% Oprob(Ft-1,Qt,Dt,Yt)
+Oprob = zeros(2,nwords,D,alphasize);
+Oprob(1,1,3,letter2num('t'),1)=1;
+Oprob(1,1,2,letter2num('h'),1)=1;
+Oprob(1,1,1,letter2num('e'),1)=1;
+Oprob(2,1,3,letter2num('T'),1)=1;
+Oprob(2,1,2,letter2num('H'),1)=1;
+Oprob(2,1,1,letter2num('E'),1)=1;
+Oprob(1,2,1,letter2num('a'),1)=1;
+Oprob(2,2,1,letter2num('A'),1)=1;
+Oprob(1,3,1,letter2num('b'),1)=1;
+Oprob(2,3,1,letter2num('B'),1)=1;
+Oprob(1,4,1,letter2num('c'),1)=1;
+Oprob(2,4,1,letter2num('C'),1)=1;
+
+% Oprob1(Qt,Dt,Yt)
+Oprob1 = zeros(nwords,D,alphasize);
+Oprob1(1,3,letter2num('t'),1)=1;
+Oprob1(1,2,letter2num('h'),1)=1;
+Oprob1(1,1,letter2num('e'),1)=1;
+Oprob1(2,1,letter2num('a'),1)=1;
+Oprob1(3,1,letter2num('b'),1)=1;
+Oprob1(4,1,letter2num('c'),1)=1;
+
+bnet.CPD{eclass(O,2)} = tabular_CPD(bnet, O+ss, 'CPT', Oprob);
+bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', Oprob1);
+
+evidence = cell(ss,T);
+%evidence{W,1}=1;
+sample = cell2num(sample_dbn(bnet, 'length', T, 'evidence', evidence));
+str = num2letter(sample(4,:))
+end
+
+
+
+
+[log_obslik, obslik, match] = mk_mgram_obslik(lower(data), words, word_len, word_prob);
+% obslik(j,t,d)
+softCPDpot = cell(ss,T);
+ens = ns;
+ens(O)=1;
+ens2 = [ens ens];
+for t=2:T
+ dom = [F W+ss L+ss O+ss];
+ % tab(Ft-1, Q2, Dt)
+ tab = ones(2, nwords, D);
+ if past
+ tab(1,:,:)=1; % if haven't finished previous word, likelihood is 1
+ tab(2,:,:) = squeeze(obslik(:,t,:)); % otherwise likelihood of this segment
+ else
+ for d=1:max(1,min(D,T+1-t))
+ tab(2,:,d) = squeeze(obslik(:,t+d-1,d));
+ end
+ end
+ softCPDpot{O,t} = dpot(dom, ens2(dom), tab);
+end
+t = 1;
+dom = [W L O];
+% tab(Q2, Dt)
+tab = ones(nwords, D);
+if past
+ tab = squeeze(obslik(:,t,:));
+else
+ for d=1:min(D,T-t)
+ tab(:,d) = squeeze(obslik(:,t+d-1,d));
+ end
+end
+softCPDpot{O,t} = dpot(dom, ens(dom), tab);
+
+
+%bnet.observed = [];
+% uniformative observations
+%bnet.CPD{eclass(O,2)} = tabular_CPD(bnet, O+ss, 'CPT', mk_stochastic(ones(2,nwords,D,alphasize)));
+%bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', mk_stochastic(ones(nwords,D,alphasize)));
+
+engine = jtree_dbn_inf_engine(bnet);
+evidence = cell(ss,T);
+% we add dummy data to O to force its effective size to be 1.
+% The actual values have already been incorporated into softCPDpot
+evidence(O,:) = num2cell(ones(1,T));
+[engine, ll_dbn] = enter_evidence(engine, evidence, 'softCPDpot', softCPDpot);
+
+
+%evidence(F,:) = num2cell(2*ones(1,T));
+%[engine, ll_dbn] = enter_evidence(engine, evidence);
+
+
+gamma = zeros(nwords, T);
+for t=1:T
+ m = marginal_nodes(engine, [W F], t);
+ gamma(:,t) = m.T(:,2);
+end
+
+gamma
+
+xidbn = zeros(nwords, nwords);
+for t=1:T-1
+ m = marginal_nodes(engine, [W F W+ss], t);
+ xidbn = xidbn + squeeze(m.T(:,2,:));
+end
+
+% thee
+% xidbn(1,4) = 0.9412 the->e
+% (2,3)=0.0588 t->h
+% (3,4)=0.0588 h-e
+% (4,4)=0.0588 e-e
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/letter2num.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/letter2num.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function n = letter2num(l)
+
+% map a-z to 1:26 and A-Z to 27:52
+punct_code = [32:47 58:64 91:96 123:126];
+digits_code = 48:57;
+upper_code = 65:90;
+lower_code = 97:122;
+
+c = double(l);
+n = c-96;
+ndx = find(n <= 0); % upper case
+n(ndx) = c(ndx) - 64 + 26;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/mgram1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/mgram1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,116 @@
+% a multigram is a degenerate 2HHMM where the bottom level HMMs emit deterministic strings
+% and the the top level abstract states are independent of each other
+% cf. HSMM/test_mgram2
+
+words = {'the', 't', 'h', 'e'};
+data = 'the';
+nwords = length(words);
+word_len = zeros(1, nwords);
+word_prob = normalise(ones(1,nwords));
+word_logprob = log(word_prob);
+for wi=1:nwords
+ word_len(wi)=length(words{wi});
+end
+D = max(word_len);
+
+alphasize = 26;
+data = letter2num(data);
+T = length(data);
+
+% node numbers
+W = 1; % top level state = word id
+L = 2; % bottom level state = letter position within word
+F = 3;
+O = 4;
+
+ss = 4;
+intra = zeros(ss,ss);
+intra(W,[F L O])=1;
+intra(L,[O F])=1;
+
+inter = zeros(ss,ss);
+inter(W,W)=1;
+inter(L,L)=1;
+inter(F,[W L])=1;
+
+% node sizes
+ns = zeros(1,ss);
+ns(W) = nwords;
+ns(L) = D;
+ns(F) = 2;
+ns(O) = alphasize;
+
+
+% Make the DBN
+bnet = mk_dbn(intra, inter, ns, 'observed', O);
+eclass = bnet.equiv_class;
+
+
+
+% uniform start distrib over words, uniform trans mat
+Wstart = normalise(ones(1,nwords));
+Wtrans = mk_stochastic(ones(nwords,nwords));
+
+% always start in state 1 for each bottom level HMM
+delta1_start = zeros(1, D);
+delta1_start(1) = 1;
+Lstart = repmat(delta1_start, nwords, 1);
+LRtrans = mk_leftright_transmat(D, 0); % 0 self loop prob
+Ltrans = repmat(LRtrans, [1 1 nwords]);
+
+% Finish in the last letter of each word
+Fprob = zeros(nwords, D, 2);
+Fprob(:,:,1)=1;
+for i=1:nwords
+ Fprob(i,length(words{i}),2)=1;
+ Fprob(i,length(words{i}),1)=0;
+end
+
+% Each state uniquely emits a letter
+Oprob = zeros(nwords, D, alphasize);
+for i=1:nwords
+ for l=1:length(words{i})
+ a = double(words{i}(l))-96;
+ Oprob(i,l,a)=1;
+ end
+end
+
+
+% Define CPDs for slice
+bnet.CPD{eclass(W,1)} = tabular_CPD(bnet, W, 'CPT', Wstart);
+bnet.CPD{eclass(L,1)} = tabular_CPD(bnet, L, 'CPT', Lstart);
+bnet.CPD{eclass(F,1)} = tabular_CPD(bnet, F, 'CPT', Fprob);
+bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', Oprob);
+
+% Define CPDs for slice 2
+bnet.CPD{eclass(W,2)} = hhmmQ_CPD(bnet, W+ss, 'Fbelow', F, 'startprob', Wstart, 'transprob', Wtrans);
+bnet.CPD{eclass(L,2)} = hhmmQ_CPD(bnet, L+ss, 'Fself', F, 'Qps', W+ss, 'startprob', Lstart, 'transprob', Ltrans);
+
+evidence = cell(ss,T);
+evidence{W,1}=1;
+sample = cell2num(sample_dbn(bnet, 'length', T, 'evidence', evidence));
+str = lower(sample(4,:))
+
+engine = jtree_dbn_inf_engine(bnet);
+evidence = cell(ss,T);
+evidence(O,:) = num2cell(data);
+[engine, ll_dbn] = enter_evidence(engine, evidence);
+
+gamma = zeros(nwords, T);
+for t=1:T
+ m = marginal_nodes(engine, [W F], t);
+ gamma(:,t) = m.T(:,2);
+end
+gamma
+
+xidbn = zeros(nwords, nwords);
+for t=1:T-1
+ m = marginal_nodes(engine, [W F W+ss], t);
+ xidbn = xidbn + squeeze(m.T(:,2,:));
+end
+
+% thee
+% xidbn(1,4) = 0.9412 the->e
+% (2,3)=0.0588 t->h
+% (3,4)=0.0588 h-e
+% (4,4)=0.0588 e-e
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/mgram2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/mgram2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,200 @@
+% Like a durational HMM, except we use soft evidence on the observed nodes.
+% Should give the same results as HSMM/test_mgram2.
+
+past = 1;
+% If past=1, P(Yt|Qt=j,Dt=d) = P(y_{t-d+1:t}|j)
+% If past=0, P(Yt|Qt=j,Dt=d) = P(y_{t:t+d-1}|j) - future evidence
+
+words = {'the', 't', 'h', 'e'};
+data = 'the';
+nwords = length(words);
+word_len = zeros(1, nwords);
+word_prob = normalise(ones(1,nwords));
+word_logprob = log(word_prob);
+for wi=1:nwords
+ word_len(wi)=length(words{wi});
+end
+D = max(word_len);
+
+
+alphasize = 26*2;
+data = letter2num(data);
+T = length(data);
+
+% node numbers
+W = 1; % top level state = word id
+L = 2; % bottom level state = letter position within word
+F = 3;
+O = 4;
+
+ss = 4;
+intra = zeros(ss,ss);
+intra(W,[F L O])=1;
+intra(L,[O F])=1;
+
+inter = zeros(ss,ss);
+inter(W,W)=1;
+inter(L,L)=1;
+inter(F,[W L O])=1;
+
+% node sizes
+ns = zeros(1,ss);
+ns(W) = nwords;
+ns(L) = D;
+ns(F) = 2;
+ns(O) = alphasize;
+ns2 = [ns ns];
+
+% Make the DBN
+bnet = mk_dbn(intra, inter, ns, 'observed', O);
+eclass = bnet.equiv_class;
+
+% uniform start distrib over words, uniform trans mat
+Wstart = normalise(ones(1,nwords));
+Wtrans = mk_stochastic(ones(nwords,nwords));
+%Wtrans = ones(nwords,nwords);
+
+% always start in state d = length(word) for each bottom level HMM
+Lstart = zeros(nwords, D);
+for i=1:nwords
+ l = length(words{i});
+ Lstart(i,l)=1;
+end
+
+% make downcounters
+RLtrans = mk_rightleft_transmat(D, 0); % 0 self loop prob
+Ltrans = repmat(RLtrans, [1 1 nwords]);
+
+% Finish when downcoutner = 1
+Fprob = zeros(nwords, D, 2);
+Fprob(:,1,2)=1;
+Fprob(:,2:end,1)=1;
+
+
+% Define CPDs for slice 1
+bnet.CPD{eclass(W,1)} = tabular_CPD(bnet, W, 'CPT', Wstart);
+bnet.CPD{eclass(L,1)} = tabular_CPD(bnet, L, 'CPT', Lstart);
+bnet.CPD{eclass(F,1)} = tabular_CPD(bnet, F, 'CPT', Fprob);
+
+
+% Define CPDs for slice 2
+bnet.CPD{eclass(W,2)} = hhmmQ_CPD(bnet, W+ss, 'Fbelow', F, 'startprob', Wstart, 'transprob', Wtrans);
+bnet.CPD{eclass(L,2)} = hhmmQ_CPD(bnet, L+ss, 'Fself', F, 'Qps', W+ss, 'startprob', Lstart, 'transprob', Ltrans);
+
+
+if 0
+% To test it is generating correctly, we create an artificial
+% observation process that capitalizes at the start of a new segment
+% Oprob(Ft-1,Qt,Dt,Yt)
+Oprob = zeros(2,nwords,D,alphasize);
+Oprob(1,1,3,letter2num('t'),1)=1;
+Oprob(1,1,2,letter2num('h'),1)=1;
+Oprob(1,1,1,letter2num('e'),1)=1;
+Oprob(2,1,3,letter2num('T'),1)=1;
+Oprob(2,1,2,letter2num('H'),1)=1;
+Oprob(2,1,1,letter2num('E'),1)=1;
+Oprob(1,2,1,letter2num('a'),1)=1;
+Oprob(2,2,1,letter2num('A'),1)=1;
+Oprob(1,3,1,letter2num('b'),1)=1;
+Oprob(2,3,1,letter2num('B'),1)=1;
+Oprob(1,4,1,letter2num('c'),1)=1;
+Oprob(2,4,1,letter2num('C'),1)=1;
+
+% Oprob1(Qt,Dt,Yt)
+Oprob1 = zeros(nwords,D,alphasize);
+Oprob1(1,3,letter2num('t'),1)=1;
+Oprob1(1,2,letter2num('h'),1)=1;
+Oprob1(1,1,letter2num('e'),1)=1;
+Oprob1(2,1,letter2num('a'),1)=1;
+Oprob1(3,1,letter2num('b'),1)=1;
+Oprob1(4,1,letter2num('c'),1)=1;
+
+bnet.CPD{eclass(O,2)} = tabular_CPD(bnet, O+ss, 'CPT', Oprob);
+bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', Oprob1);
+
+evidence = cell(ss,T);
+%evidence{W,1}=1;
+sample = cell2num(sample_dbn(bnet, 'length', T, 'evidence', evidence));
+str = num2letter(sample(4,:))
+end
+
+
+if 1
+
+[log_obslik, obslik, match] = mk_mgram_obslik(lower(data), words, word_len, word_prob);
+% obslik(j,t,d)
+softCPDpot = cell(ss,T);
+ens = ns;
+ens(O)=1;
+ens2 = [ens ens];
+for t=2:T
+ dom = [F W+ss L+ss O+ss];
+ % tab(Ft-1, Q2, Dt)
+ tab = ones(2, nwords, D);
+ if past
+ tab(1,:,:)=1; % if haven't finished previous word, likelihood is 1
+ %tab(2,:,:) = squeeze(obslik(:,t,:)); % otherwise likelihood of this segment
+ for d=1:min(t,D)
+ tab(2,:,d) = squeeze(obslik(:,t,d));
+ end
+ else
+ for d=1:max(1,min(D,T+1-t))
+ tab(2,:,d) = squeeze(obslik(:,t+d-1,d));
+ end
+ end
+ softCPDpot{O,t} = dpot(dom, ens2(dom), tab);
+end
+t = 1;
+dom = [W L O];
+% tab(Q2, Dt)
+tab = ones(nwords, D);
+if past
+ %tab = squeeze(obslik(:,t,:));
+ tab(:,1) = squeeze(obslik(:,t,1));
+else
+ for d=1:min(D,T-t)
+ tab(:,d) = squeeze(obslik(:,t+d-1,d));
+ end
+end
+softCPDpot{O,t} = dpot(dom, ens(dom), tab);
+
+
+%bnet.observed = [];
+% uniformative observations
+%bnet.CPD{eclass(O,2)} = tabular_CPD(bnet, O+ss, 'CPT', mk_stochastic(ones(2,nwords,D,alphasize)));
+%bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', mk_stochastic(ones(nwords,D,alphasize)));
+
+engine = jtree_dbn_inf_engine(bnet);
+evidence = cell(ss,T);
+% we add dummy data to O to force its effective size to be 1.
+% The actual values have already been incorporated into softCPDpot
+evidence(O,:) = num2cell(ones(1,T));
+[engine, ll_dbn] = enter_evidence(engine, evidence, 'softCPDpot', softCPDpot);
+
+
+%evidence(F,:) = num2cell(2*ones(1,T));
+%[engine, ll_dbn] = enter_evidence(engine, evidence);
+
+
+gamma = zeros(nwords, T);
+for t=1:T
+ m = marginal_nodes(engine, [W F], t);
+ gamma(:,t) = m.T(:,2);
+end
+
+gamma
+
+xidbn = zeros(nwords, nwords);
+for t=1:T-1
+ m = marginal_nodes(engine, [W F W+ss], t);
+ xidbn = xidbn + squeeze(m.T(:,2,:));
+end
+
+% thee
+% xidbn(1,4) = 0.9412 the->e
+% (2,3)=0.0588 t->h
+% (3,4)=0.0588 h-e
+% (4,4)=0.0588 e-e
+
+
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/mgram3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/mgram3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,235 @@
+% like mgram2, except we unroll the DBN so we can use smaller
+% state spaces for the early duration nodes:
+% the state spaces are D1 in {1}, D2 in {1,2}
+
+past = 1;
+
+words = {'the', 't', 'h', 'e'};
+data = 'the';
+nwords = length(words);
+word_len = zeros(1, nwords);
+word_prob = normalise(ones(1,nwords));
+word_logprob = log(word_prob);
+for wi=1:nwords
+ word_len(wi)=length(words{wi});
+end
+D = max(word_len);
+
+
+alphasize = 26*2;
+data = letter2num(data);
+T = length(data);
+
+% node numbers
+W = 1; % top level state = word id
+L = 2; % bottom level state = letter position within word
+F = 3;
+O = 4;
+
+ss = 4;
+intra = zeros(ss,ss);
+intra(W,[F L O])=1;
+intra(L,[O F])=1;
+
+inter = zeros(ss,ss);
+inter(W,W)=1;
+inter(L,L)=1;
+inter(F,[W L O])=1;
+
+T = 3;
+dag = unroll_dbn_topology(intra, inter, T);
+
+% node sizes
+ns = zeros(1,ss);
+ns(W) = nwords;
+ns(L) = D;
+ns(F) = 2;
+ns(O) = alphasize;
+ns = repmat(ns(:), [1 T]);
+for d=1:D
+ ns(d,L)=d; % max duration
+end
+ns = ns(:);
+
+% Equiv class in brackets for D=3
+% The Lt's are not tied until t>=D, since they have different sizes.
+% W1 and W2 are not tied since they have different parent sets.
+
+% W1 (1) W2 (5) W3 (5) W4 (5)
+% L1 (2) L2 (6) L3 (7) L4 (7)
+% F1 (3) F2 (3) F3 (4) F3 (4)
+% O1 (4) O2 (4) O2 (4) O4 (4)
+
+% Since we are not learning, we can dispense with tying
+
+% Make the bnet
+Wnodes = unroll_set(W, ss, T);
+Lnodes = unroll_set(L, ss, T);
+Fnodes = unroll_set(F, ss, T);
+Onodes = unroll_set(O, ss, T);
+
+bnet = mk_bnet(dag, ns);
+eclass = bnet.equiv_class;
+
+% uniform start distrib over words, uniform trans mat
+Wstart = normalise(ones(1,nwords));
+Wtrans = mk_stochastic(ones(nwords,nwords));
+bnet.CPD{eclass(Wnodes(1))} = tabular_CPD(bnet, Wnodes(1), 'CPT', Wstart);
+for t=2:T
+bnet.CPD{eclass(Wnodes(t))} = hhmmQ_CPD(bnet, Wnodes(t), 'Fbelow', Fnodes(t-1), ...
+ 'startprob', Wstart, 'transprob', Wtrans);
+end
+
+% always start in state d = length(word) for each bottom level HMM
+% and then count down
+% make downcounters
+RLtrans = mk_rightleft_transmat(D, 0); % 0 self loop prob
+Ltrans = repmat(RLtrans, [1 1 nwords]);
+
+for t=1:T
+ Lstart = zeros(nwords, min(t,D));
+ for i=1:nwords
+ l = length(words{i});
+ Lstart(i,l)=1;
+ if d==1
+ bnet.CPD{eclass(Lnodes(1))} = tabular_CPD(bnet, Lnodes(1), 'CPT', Lstart);
+ else
+ bnet.CPD{eclass(Lnodes(t))} = hhmmQ_CPD(bnet, Lnodes(t), 'Fself', Fnodes(t-1), 'Qps', Wnodes(t), ...
+ 'startprob', Lstart, 'transprob', Ltrans);
+ end
+ end
+end
+
+
+% Finish when downcoutner = 1
+Fprob = zeros(nwords, D, 2);
+Fprob(:,1,2)=1;
+Fprob(:,2:end,1)=1;
+
+
+% Define CPDs for slice
+bnet.CPD{eclass(W,1)} = tabular_CPD(bnet, W, 'CPT', Wstart);
+bnet.CPD{eclass(L,1)} = tabular_CPD(bnet, L, 'CPT', Lstart);
+bnet.CPD{eclass(F,1)} = tabular_CPD(bnet, F, 'CPT', Fprob);
+
+
+% Define CPDs for slice 2
+bnet.CPD{eclass(W,2)} = hhmmQ_CPD(bnet, W+ss, 'Fbelow', F, 'startprob', Wstart, 'transprob', Wtrans);
+bnet.CPD{eclass(L,2)} = hhmmQ_CPD(bnet, L+ss, 'Fself', F, 'Qps', W+ss, 'startprob', Lstart, 'transprob', Ltrans);
+
+
+if 0
+% To test it is generating correctly, we create an artificial
+% observation process that capitalizes at the start of a new segment
+% Oprob(Ft-1,Qt,Dt,Yt)
+Oprob = zeros(2,nwords,D,alphasize);
+Oprob(1,1,3,letter2num('t'),1)=1;
+Oprob(1,1,2,letter2num('h'),1)=1;
+Oprob(1,1,1,letter2num('e'),1)=1;
+Oprob(2,1,3,letter2num('T'),1)=1;
+Oprob(2,1,2,letter2num('H'),1)=1;
+Oprob(2,1,1,letter2num('E'),1)=1;
+Oprob(1,2,1,letter2num('a'),1)=1;
+Oprob(2,2,1,letter2num('A'),1)=1;
+Oprob(1,3,1,letter2num('b'),1)=1;
+Oprob(2,3,1,letter2num('B'),1)=1;
+Oprob(1,4,1,letter2num('c'),1)=1;
+Oprob(2,4,1,letter2num('C'),1)=1;
+
+% Oprob1(Qt,Dt,Yt)
+Oprob1 = zeros(nwords,D,alphasize);
+Oprob1(1,3,letter2num('t'),1)=1;
+Oprob1(1,2,letter2num('h'),1)=1;
+Oprob1(1,1,letter2num('e'),1)=1;
+Oprob1(2,1,letter2num('a'),1)=1;
+Oprob1(3,1,letter2num('b'),1)=1;
+Oprob1(4,1,letter2num('c'),1)=1;
+
+bnet.CPD{eclass(O,2)} = tabular_CPD(bnet, O+ss, 'CPT', Oprob);
+bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', Oprob1);
+
+evidence = cell(ss,T);
+%evidence{W,1}=1;
+sample = cell2num(sample_dbn(bnet, 'length', T, 'evidence', evidence));
+str = num2letter(sample(4,:))
+end
+
+
+
+
+[log_obslik, obslik, match] = mk_mgram_obslik(lower(data), words, word_len, word_prob);
+% obslik(j,t,d)
+softCPDpot = cell(ss,T);
+ens = ns;
+ens(O)=1;
+ens2 = [ens ens];
+for t=2:T
+ dom = [F W+ss L+ss O+ss];
+ % tab(Ft-1, Q2, Dt)
+ tab = ones(2, nwords, D);
+ if past
+ tab(1,:,:)=1; % if haven't finished previous word, likelihood is 1
+ %tab(2,:,:) = squeeze(obslik(:,t,:)); % otherwise likelihood of this segment
+ for d=1:min(t,D)
+ tab(2,:,d) = squeeze(obslik(:,t,d));
+ end
+ else
+ for d=1:max(1,min(D,T+1-t))
+ tab(2,:,d) = squeeze(obslik(:,t+d-1,d));
+ end
+ end
+ softCPDpot{O,t} = dpot(dom, ens2(dom), tab);
+end
+t = 1;
+dom = [W L O];
+% tab(Q2, Dt)
+tab = ones(nwords, D);
+if past
+ %tab = squeeze(obslik(:,t,:));
+ tab(:,1) = squeeze(obslik(:,t,1));
+else
+ for d=1:min(D,T-t)
+ tab(:,d) = squeeze(obslik(:,t+d-1,d));
+ end
+end
+softCPDpot{O,t} = dpot(dom, ens(dom), tab);
+
+
+%bnet.observed = [];
+% uniformative observations
+%bnet.CPD{eclass(O,2)} = tabular_CPD(bnet, O+ss, 'CPT', mk_stochastic(ones(2,nwords,D,alphasize)));
+%bnet.CPD{eclass(O,1)} = tabular_CPD(bnet, O, 'CPT', mk_stochastic(ones(nwords,D,alphasize)));
+
+engine = jtree_dbn_inf_engine(bnet);
+evidence = cell(ss,T);
+% we add dummy data to O to force its effective size to be 1.
+% The actual values have already been incorporated into softCPDpot
+evidence(O,:) = num2cell(ones(1,T));
+[engine, ll_dbn] = enter_evidence(engine, evidence, 'softCPDpot', softCPDpot);
+
+
+%evidence(F,:) = num2cell(2*ones(1,T));
+%[engine, ll_dbn] = enter_evidence(engine, evidence);
+
+
+gamma = zeros(nwords, T);
+for t=1:T
+ m = marginal_nodes(engine, [W F], t);
+ gamma(:,t) = m.T(:,2);
+end
+
+gamma
+
+xidbn = zeros(nwords, nwords);
+for t=1:T-1
+ m = marginal_nodes(engine, [W F W+ss], t);
+ xidbn = xidbn + squeeze(m.T(:,2,:));
+end
+
+% thee
+% xidbn(1,4) = 0.9412 the->e
+% (2,3)=0.0588 t->h
+% (3,4)=0.0588 h-e
+% (4,4)=0.0588 e-e
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/num2letter.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Mgram/num2letter.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function l = num2letter(n)
+
+% map 1:26 to a-z and 27:52 to A-Z
+punct_code = [32:47 58:64 91:96 123:126];
+digits_code = 48:57;
+upper_code = 65:90;
+lower_code = 97:122;
+
+letters = [char(lower_code) char(upper_code)];
+l = letters(n);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/fixed_args_mk_motif_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/learn_motif_hhmm.m/1.1.1.1/Tue Jul 2 22:56:14 2002//
+/mk_motif_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_motif_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/HHMM/Motif
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/fixed_args_mk_motif_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/fixed_args_mk_motif_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,99 @@
+function bnet = fixed_args_mk_motif_hhmm(motif_length, motif_pattern, background_char)
+%
+% BNET = MK_MOTIF_HHMM(MOTIF_LENGTH)
+% Make the following HHMM
+%
+% S2 <----------------------> S1
+% | |
+% | |
+% M1 -> M2 -> M3 -> end B1 -> end
+%
+% where Mi represents the i'th letter in the motif
+% and B is the background state.
+% Si chooses between running the motif or the background.
+% The Si and B states have self loops (not shown).
+%
+% The transition params are defined to respect the above topology.
+% The background is uniform; each motif state has a random obs. distribution.
+%
+% BNET = MK_MOTIF_HHMM(MOTIF_LENGTH, MOTIF_PATTERN)
+% In this case, we make the motif submodel deterministically
+% emit the motif pattern.
+%
+% BNET = MK_MOTIF_HHMM(MOTIF_LENGTH, MOTIF_PATTERN, BACKGROUND_CHAR)
+% In this case, we make the background submodel
+% deterministically emit the specified character (to make the pattern
+% easier to see).
+
+if nargin < 2, motif_pattern = []; end
+if nargin < 3, background_char = []; end
+
+chars = ['a', 'c', 'g', 't'];
+Osize = length(chars);
+
+motif_length = length(motif_pattern);
+Qsize = [2 motif_length];
+Qnodes = 1:2;
+D = 2;
+transprob = cell(1,D);
+termprob = cell(1,D);
+startprob = cell(1,D);
+
+% startprob{d}(k,j), startprob{1}(1,j)
+% transprob{d}(i,k,j), transprob{1}(i,j)
+% termprob{d}(k,j)
+
+
+% LEVEL 1
+
+startprob{1} = zeros(1, 2);
+startprob{1} = [1 0]; % always start in the background model
+
+% When in the background state, we stay there with high prob
+% When in the motif state, we immediately return to the background state.
+transprob{1} = [0.8 0.2;
+ 1.0 0.0];
+
+
+% LEVEL 2
+startprob{2} = 'leftstart'; % both submodels start in substate 1
+transprob{2} = zeros(motif_length, 2, motif_length);
+termprob{2} = zeros(2, motif_length);
+
+% In the background model, we only use state 1.
+transprob{2}(1,1,1) = 1; % self loop
+termprob{2}(1,1) = 0.2; % prob transition to end state
+
+% Motif model
+transprob{2}(:,2,:) = mk_leftright_transmat(motif_length, 0); % no self loops
+termprob{2}(2,end) = 1.0; % last state immediately terminates
+
+
+% OBS LEVEl
+
+obsprob = zeros([Qsize Osize]);
+if isempty(background_char)
+ % uniform background model
+ obsprob(1,1,:) = normalise(ones(Osize,1));
+else
+ % deterministic background model (easy to see!)
+ m = find(chars==background_char);
+ obsprob(1,1,m) = 1.0;
+end
+
+if gen_motif
+ % initialise with true motif (cheating)
+ for i=1:motif_length
+ m = find(chars == motif_pattern(i));
+ obsprob(2,i,m) = 1.0;
+ end
+else
+ obsprob(2,:,:) = mk_stochastic(ones(motif_length, Osize));
+end
+
+Oargs = {'CPT', obsprob};
+
+[bnet, Qnodes, Fnodes, Onode] = mk_hhmm('Qsizes', Qsize, 'Osize', Osize, 'discrete_obs', 1, ...
+ 'Oargs', Oargs, 'Ops', Qnodes(1:2), ...
+ 'startprob', startprob, 'transprob', transprob, 'termprob', termprob);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/learn_motif_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/learn_motif_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,75 @@
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+chars = ['a', 'c', 'g', 't'];
+motif = 'accca';
+motif_length = length(motif);
+motif_code = zeros(1, motif_length);
+for i=1:motif_length
+ motif_code(i) = find(chars == motif(i));
+end
+
+[bnet_init, Qnodes, Fnodes, Onode] = mk_motif_hhmm('motif_length', length(motif));
+%[bnet_init, Qnodes, Fnodes, Onode] = mk_motif_hhmm('motif_pattern', motif);
+ss = bnet_init.nnodes_per_slice;
+
+
+
+% We generate a training set by creating uniform sequences,
+% and inserting a single motif at a random location.
+ntrain = 100;
+T = 20;
+cases = cell(1, ntrain);
+
+if 1
+ % uniform background
+ background_dist = normalise(ones(1, length(chars)));
+end
+if 0
+ % use a constant background
+ background_dist = zeros(1, length(chars));
+ m = find(chars=='t');
+ background_dist(m) = 1.0;
+end
+if 0
+ % use a background skewed away from the motif
+ p = 0.01; q = (1-(2*p))/2;
+ background_dist = [p p q q];
+end
+
+unif_pos = normalise(ones(1, T-length(motif)));
+cases = cell(1, ntrain);
+data = zeros(1,T);
+for i=1:ntrain
+ data = sample_discrete(background_dist, 1, T);
+ L = sample_discrete(unif_pos, 1, 1);
+ data(L:L+length(motif)-1) = motif_code;
+ cases{i} = cell(ss, T);
+ cases{i}(Onode,:) = num2cell(data);
+end
+disp('sample training cases')
+for i=1:5
+ chars(cell2num(cases{i}(Onode,:)))
+end
+
+engine_init = hmm_inf_engine(bnet_init);
+
+[bnet_learned, LL, engine_learned] = ...
+ learn_params_dbn_em(engine_init, cases, 'max_iter', 100, 'thresh', 1e-2);
+% 'anneal', 1, 'anneal_rate', 0.7);
+
+% extract the learned motif profile
+eclass = bnet_learned.equiv_class;
+CPDO=struct(bnet_learned.CPD{eclass(Onode,1)});
+fprintf('columns = chars, rows = states\n');
+profile_learned = squeeze(CPDO.CPT(2,:,:))
+[m,ndx] = max(profile_learned, [], 2);
+map_motif_learned = chars(ndx)
+back_learned = squeeze(CPDO.CPT(1,1,:))'
+%map_back_learned = chars(argmax(back_learned))
+
+CPDO_init = struct(bnet_init.CPD{eclass(Onode,1)});
+profile_init = squeeze(CPDO_init.CPT(2,:,:));
+back_init = squeeze(CPDO_init.CPT(1,1,:))';
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/mk_motif_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/mk_motif_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,137 @@
+function [bnet, Qnodes, Fnodes, Onode] = mk_motif_hhmm(varargin)
+% [bnet, Qnodes, Fnodes, Onode] = mk_motif_hhmm(...)
+%
+% Make the following HHMM
+%
+% S2 <----------------------> S1
+% | |
+% | |
+% M1 -> M2 -> M3 -> end B1 -> end
+%
+% where Mi represents the i'th letter in the motif
+% and B is the background state.
+% Si chooses between running the motif or the background.
+% The Si and B states have self loops (not shown).
+%
+% The transition params are defined to respect the above topology.
+% The background is uniform; each motif state has a random obs. distribution.
+%
+% Optional params:
+% motif_length - required, unless we specify motif_pattern
+% motif_pattern - if specified, we make the motif submodel deterministically
+% emit this pattern
+% background - if specified, we make the background submodel
+% deterministically emit this (makes the motif easier to see!)
+
+
+args = varargin;
+nargs = length(args);
+
+% extract pattern, if any
+motif_pattern = [];
+for i=1:2:nargs
+ switch args{i},
+ case 'motif_pattern', motif_pattern = args{i+1};
+ end
+end
+
+% set defaults
+motif_length = length(motif_pattern);
+background_char = [];
+
+% get params
+for i=1:2:nargs
+ switch args{i},
+ case 'motif_length', motif_length = args{i+1};
+ case 'background', background_char = args{i+1};
+ end
+end
+
+
+chars = ['a', 'c', 'g', 't'];
+Osize = length(chars);
+
+Qsize = [2 motif_length];
+Qnodes = 1:2;
+D = 2;
+transprob = cell(1,D);
+termprob = cell(1,D);
+startprob = cell(1,D);
+
+% startprob{d}(k,j), startprob{1}(1,j)
+% transprob{d}(i,k,j), transprob{1}(i,j)
+% termprob{d}(k,j)
+
+
+% LEVEL 1
+
+startprob{1} = zeros(1, 2);
+startprob{1} = [1 0]; % always start in the background model
+
+% When in the background state, we stay there with high prob
+% When in the motif state, we immediately return to the background state.
+transprob{1} = [0.8 0.2;
+ 1.0 0.0];
+
+
+% LEVEL 2
+startprob{2} = 'leftstart'; % both submodels start in substate 1
+transprob{2} = zeros(motif_length, 2, motif_length);
+termprob{2} = zeros(2, motif_length);
+
+% In the background model, we only use state 1.
+transprob{2}(1,1,1) = 1; % self loop
+termprob{2}(1,1) = 0.2; % prob transition to end state
+
+% Motif model
+transprob{2}(:,2,:) = mk_leftright_transmat(motif_length, 0); % no self loops
+termprob{2}(2,end) = 1.0; % last state immediately terminates
+
+
+% OBS LEVEl
+
+obsprob = zeros([Qsize Osize]);
+if isempty(background_char)
+ % uniform background model
+ %obsprob(1,1,:) = normalise(ones(Osize,1));
+ obsprob(1,1,:) = normalise(rand(Osize,1));
+else
+ % deterministic background model (easy to see!)
+ m = find(chars==background_char);
+ obsprob(1,1,m) = 1.0;
+end
+
+if ~isempty(motif_pattern)
+ % initialise with true motif (cheating)
+ for i=1:motif_length
+ m = find(chars == motif_pattern(i));
+ obsprob(2,i,m) = 1.0;
+ end
+else
+ obsprob(2,:,:) = mk_stochastic(rand(motif_length, Osize));
+end
+
+if 0
+ Oargs = {'CPT', obsprob};
+else
+ % We use a minent prior for the emission distribution for the states in the motif model
+ % (but not the background model). This encourages nearly deterministic distributions.
+ % We create an index matrix (where M = motif length)
+ % [2 1
+ % 2 2
+ % ...
+ % 2 M]
+ % and then convert this to a list of integers, which
+ % specifies when to use the minent prior (Q1=2 specifies motif model).
+ M = motif_length;
+ ndx = [2*ones(M,1) (1:M)'];
+ pcases = subv2ind([2 motif_length], ndx);
+ Oargs = {'CPT', obsprob, 'prior_type', 'entropic', 'entropic_pcases', pcases};
+end
+
+
+
+[bnet, Qnodes, Fnodes, Onode] = mk_hhmm('Qsizes', Qsize, 'Osize', Osize, 'discrete_obs', 1, ...
+ 'Oargs', Oargs, 'Ops', Qnodes(1:2), ...
+ 'startprob', startprob, 'transprob', transprob, 'termprob', termprob);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/sample_motif_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Motif/sample_motif_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+%bnet = mk_motif_hhmm('motif_pattern', 'acca', 'background', 't');
+bnet = mk_motif_hhmm('motif_pattern', 'accaggggga', 'background', []);
+
+chars = ['a', 'c', 'g', 't'];
+Tmax = 100;
+
+for seqi=1:5
+ evidence = cell2num(sample_dbn(bnet, 'length', Tmax));
+ chars(evidence(end,:))
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+/mk_abcd_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_arrow_alpha_hhmm3.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_hhmm2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_hhmm3.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_hhmm3_args.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/motif_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/remove_hhmm_end_state.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/HHMM/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_abcd_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_abcd_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,109 @@
+% Make the HHMM in Figure 1 of the NIPS'01 paper
+
+Qsize = [2 3 2];
+D = 3;
+
+% transprob{d}(i,k,j), transprob{1}(i,j)
+% termprob{d}(k,j), termprob{1}(1,j)
+% startprob{d}(k,j), startprob{1}(1,j)
+% obsprob(k, o) for discrete outputs
+
+% LEVEL 1
+% 1 2 e
+A{1} = [0 0 1;
+ 0 0 1];
+[transprob{1}, termprob{1}] = remove_hhmm_end_state(A{1});
+startprob{1} = [0.5 0.5];
+Q1args = {'startprob', startprob{1}, 'transprob', transprob{1}};
+
+% LEVEL 2
+A{2} = zeros(Qsize(2), Qsize(1), Qsize(2)+1);
+
+% 1 2 3 e
+A{2}(:,1,:) = [0 1 0 0
+ 0 0 1 0
+ 0 0 0 1];
+
+% 1 2 3 e
+A{2}(:,2,:) = [0 1 0 0
+ 0 0 1 0
+ 0 0 0 1];
+
+[transprob{2}, termprob{2}] = remove_hhmm_end_state(A{2});
+
+% always enter level 2 in state 1
+startprob{2} = [1 0 0
+ 1 0 0];
+
+Q2args = {'startprob', startprob{2}, 'transprob', transprob{2}};
+F2args = {'CPT', termprob{2}};
+
+
+% LEVEL 3
+
+A{3} = zeros([Qsize(3) Qsize(1:2) Qsize(3)+1]);
+endstate = Qsize(3)+1;
+% Qt-1(3) Qt(1) Qt(2) Qt(3)
+% 1 2 e
+A{3}(1, 1, 1, endstate) = 1.0;
+A{3}(:, 1, 2, :) = [0.0 1.0 0.0
+ 0.5 0.0 0.5];
+A{3}(1, 1, 3, endstate) = 1.0;
+
+A{3}(1, 2, 1, endstate) = 1.0;
+A{3}(:, 2, 2, :) = [0.0 1.0 0.0
+ 0.5 0.0 0.5];
+A{3}(1, 2, 3, endstate) = 1.0;
+
+A{3} = reshape(A{3}, [Qsize(3) prod(Qsize(1:2)) Qsize(3)+1]);
+[transprob{3}, termprob{3}] = remove_hhmm_end_state(A{3});
+
+% define the vertical entry points to level 3
+startprob{3} = zeros(Qsize);
+% Q1 Q2 Q3
+startprob{3}(1, 1, 1) = 1.0;
+startprob{3}(1, 2, 1) = 1.0;
+startprob{3}(1, 3, 1) = 1.0;
+
+startprob{3}(2, 1, 1) = 1.0;
+startprob{3}(2, 2, 1) = 1.0;
+startprob{3}(2, 3, 1) = 1.0;
+
+startprob{3} = reshape(startprob{3}, prod(Qsize(1:2)), Qsize(3));
+
+chars = ['a', 'b', 'c', 'd', 'x', 'y'];
+Osize = length(chars);
+
+obsprob = zeros([Qsize Osize]);
+% 1 2 3 O
+obsprob(1,1,1,find(chars == 'a')) = 1.0;
+
+obsprob(1,2,1,find(chars == 'x')) = 1.0;
+obsprob(1,2,2,find(chars == 'y')) = 1.0;
+
+obsprob(1,3,1,find(chars == 'b')) = 1.0;
+
+obsprob(2,1,1,find(chars == 'c')) = 1.0;
+
+obsprob(2,2,1,find(chars == 'x')) = 1.0;
+obsprob(2,2,2,find(chars == 'y')) = 1.0;
+
+obsprob(2,3,1,find(chars == 'd')) = 1.0;
+
+obsprob = reshape(obsprob, prod(Qsize), Osize);
+
+[intra, inter, Qnodes, Fnodes, Onode] = mk_hhmm_topo(D);
+
+hhmm.Qnodes = Qnodes;
+hhmm.Fnodes = Fnodes;
+hhmm.Onode = Onode;
+hhmm.D = D;
+hhmm.Qsize = Qsize;
+hhmm.Osize = Osize;
+hhmm.startprob = startprob;
+hhmm.transprob = transprob;
+hhmm.termprob = termprob;
+hhmm.obsprob = obsprob;
+hhmm.A = A;
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_arrow_alpha_hhmm3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_arrow_alpha_hhmm3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+% Make the following HHMM
+%
+% LH RH
+% / \
+% / \
+% LR -> UD -> RL -> DU RL -> UD -> LR -> DU
+% \
+% \
+% Q1 -> Q2
+%
+% where level 1 is fully interconnected (not shown)
+% level 2 is left-right
+% and each model at level 3 is a 2 state LR shared HMM
+
+Qsizes = [2 4 2];
+D = 3;
+
+% LEVEL 1
+
+startprob1 = 'ergodic';
+transprob1 = 'ergodic';
+
+
+% LEVEL 2
+
+startprob = zeros(2, 4);
+% Q1 Q2
+startprob(1, 1) = 1;
+startprob(2, 3) = 1;
+
+transprob = zeros(2, 4, 4);
+transprob(1,:,:) = [0 1 0 0
+ 0 0 1 0
+ 0 0 0 1
+ 0 0 0 1];
+transprob(2,:,:) = [0 0 0 1
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 0 1];
+
+Q2args = {'startprob', startprob, 'transprob', transprob};
+
+% always terminate in state 4 (default)
+% F2args
+
+% LEVEL 3
+
+% Defaults are fine: always start in state 1, left-right model, finish in state 2
+
+
+% OBS LEVEl
+
+chars = ['L', 'l', 'U', 'u', 'R', 'r', 'D', 'd'];
+Osize = length(chars);
+
+obsprob = zeros([4 2 Osize]);
+% Q2 Q3 O
+obsprob(1, 1, find(chars == 'L')) = 1.0;
+obsprob(1, 2, find(chars == 'l')) = 1.0;
+
+obsprob(2, 1, find(chars == 'U')) = 1.0;
+obsprob(2, 2, find(chars == 'u')) = 1.0;
+
+obsprob(3, 1, find(chars == 'R')) = 1.0;
+obsprob(3, 2, find(chars == 'r')) = 1.0;
+
+obsprob(4, 1, find(chars == 'D')) = 1.0;
+obsprob(4, 2, find(chars == 'd')) = 1.0;
+
+Oargs = {'CPT', obsprob};
+
+
+bnet = mk_hhmm3('Qsizes', Qsizes, 'Osize', Osize', 'discrete_obs', 1, 'Oargs', Oargs, 'Q1args', Q1args, 'Q2args', Q2args);
+
+T = 20;
+usecell = 0;
+evidence = sample_dbn(bnet, T, usecell);
+%chars(evidence(end,:))
+
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; obs = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+
+pretty_print_hhmm_parse(evidence, Qnodes, Fnodes, obs, chars);
+
+eclass = bnet.equiv_class;
+S=struct(bnet.CPD{eclass(Q2,2)})
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_hhmm2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_hhmm2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,111 @@
+function bnet = mk_hhmm2(varargin)
+% MK_HHMM2 Make a 2 level Hierarchical HMM
+% bnet = mk_hhmm2(...)
+%
+% 2-layer hierarchical HMM (node numbers in parens)
+%
+% Q1(1) ---------> Q1(5)
+% / | \ / |
+% | | v / |
+% | | F2(3) --- / |
+% | | ^ \ |
+% | | / \ |
+% | v \ v
+% | Q2(2)--------> Q2 (6)
+% | |
+% \ |
+% v v
+% O(4)
+%
+%
+% Optional arguments [default]
+%
+% discrete_obs - 1 means O is tabular_CPD, 0 means O is gaussian_CPD [0]
+% obsCPT - CPT(o,q1,q2) params for O ['rnd']
+% mu - mu(:,q1,q2) params for O [ [] ]
+% Sigma - Sigma(:,q1,q2) params for O [ [] ]
+%
+% F2toQ1 - 1 if Q2 is an hhmm_CPD, 0 if F2 -> Q2 arc is absent, so level 2 never resets [1]
+% Q1args - arguments to be passed to the constructors for Q1(t=2) [ {} ]
+% Q2args - arguments to be passed to the constructors for Q2(t=2) [ {} ]
+%
+% F2 only turns on (wp 0.5) when Q2 enters its final state.
+% Q1 (slice 1) is clamped to be uniform.
+% Q2 (slice 1) is clamped to always start in state 1.
+
+[os nmodels nstates] = size(mu);
+
+ss = 4;
+Q1 = 1; Q2 = 2; F2 = 3; obs = 4;
+Qnodes = [Q1 Q2];
+names = {'Q1', 'Q2', 'F2', 'obs'};
+intra = zeros(ss);
+intra(Q1, [Q2 F2 obs]) = 1;
+intra(Q2, [F2 obs]) = 1;
+
+inter = zeros(ss);
+inter(Q1,Q1) = 1;
+inter(F2,Q1) = 1;
+if F2toQ2
+ inter(F2,Q2)=1;
+end
+inter(Q2,Q2) = 1;
+
+ns = zeros(1,ss);
+
+ns(Q1) = nmodels;
+ns(Q2) = nstates;
+ns(F2) = 2;
+ns(obs) = os;
+
+dnodes = [Q1 Q2 F2];
+if discrete_obs
+ dnodes = [dnodes obs];
+end
+onodes = [obs];
+
+bnet = mk_dbn(intra, inter, ns, 'observed', onodes, 'discrete', dnodes, 'names', names);
+eclass = bnet.equiv_class;
+
+% SLICE 1
+
+% We clamp untied nodes in the first slice, since their params can't be estimated
+% from just one sequence
+
+% uniform prior on initial model
+CPT = normalise(ones(1,nmodels));
+bnet.CPD{eclass(Q1,1)} = tabular_CPD(bnet, Q1, 'CPT', CPT, 'adjustable', 0);
+
+% each model always starts in state 1
+CPT = zeros(ns(Q1), ns(Q2));
+CPT(:, 1) = 1.0;
+bnet.CPD{eclass(Q2,1)} = tabular_CPD(bnet, Q2, 'CPT', CPT, 'adjustable', 0);
+
+% Termination probability
+CPT = zeros(ns(Q1), ns(Q2), 2);
+if 1
+ % Each model can only terminate in its final state.
+ % 0 params will remain 0 during EM, thus enforcing this constraint.
+ CPT(:, :, 1) = 1.0; % all states turn F off ...
+ p = 0.5;
+ CPT(:, ns(Q2), 2) = p; % except the last one
+ CPT(:, ns(Q2), 1) = 1-p;
+end
+bnet.CPD{eclass(F2,1)} = tabular_CPD(bnet, F2, 'CPT', CPT);
+
+if discrete_obs
+ bnet.CPD{eclass(obs,1)} = tabular_CPD(bnet, obs, obs_args{:});
+else
+ bnet.CPD{eclass(obs,1)} = gaussian_CPD(bnet, obs, obs_args{:});
+end
+
+% SLICE 2
+
+
+bnet.CPD{eclass(Q1,2)} = hhmm_CPD(bnet, Q1+ss, Qnodes, 1, D, 'args', Q1args);
+
+if F2toQ2
+ bnet.CPD{eclass(Q2,2)} = hhmmQD_CPD(bnet, Q2+ss, Qnodes, 2, D, Q2args{:});
+else
+ bnet.CPD{eclass(Q2,2)} = tabular_CPD(bnet, Q2+ss, Q2args{:});
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_hhmm3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_hhmm3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,181 @@
+function bnet = mk_hhmm3(varargin)
+% MK_HHMM3 Make a 3 level Hierarchical HMM
+% bnet = mk_hhmm3(...)
+%
+% 3-layer hierarchical HMM where level 1 only connects to level 2, not 3 or obs.
+% This enforces sub-models (which differ only in their Q1 index) to be shared.
+% Also, we enforce the fact that each model always starts in its initial state
+% and only finishes in its final state. However, the prob. of finishing (as opposed to
+% self-transitioning to the final state) can be learned.
+% The fact that we always finish from the same state means we do not need to condition
+% F(i) on Q(i-1), since finishing prob is indep of calling context.
+%
+% The DBN is the same as Fig 10 in my tech report.
+%
+% Q1 ----------> Q1
+% | / |
+% | / |
+% | F2 ------- |
+% | ^ \ |
+% | /| \ |
+% v | v v
+% Q2-| --------> Q2
+% /| | ^
+% / | | /|
+% | | F3 ---------/ |
+% | | ^ \ |
+% | v / v
+% | Q3 -----------> Q3
+% | |
+% \ |
+% v v
+% O
+%
+%
+% Optional arguments in name/value format [default]
+%
+% Qsizes - sizes at each level [ none ]
+% Osize - size of O node [ none ]
+% discrete_obs - 1 means O is tabular_CPD, 0 means O is gaussian_CPD [0]
+% Oargs - cell array of args to pass to the O CPD [ {} ]
+% transprob1 - transprob1(i,j) = P(Q1(t)=j|Q1(t-1)=i) ['ergodic']
+% startprob1 - startprob1(j) = P(Q1(t)=j) ['leftstart']
+% transprob2 - transprob2(i,k,j) = P(Q2(t)=j|Q2(t-1)=i,Q1(t)=k) ['leftright']
+% startprob2 - startprob2(k,j) = P(Q2(t)=j|Q1(t)=k) ['leftstart']
+% termprob2 - termprob2(j,f) = P(F2(t)=f|Q2(t)=j) ['rightstop']
+% transprob3 - transprob3(i,k,j) = P(Q3(t)=j|Q3(t-1)=i,Q2(t)=k) ['leftright']
+% startprob3 - startprob3(k,j) = P(Q3(t)=j|Q2(t)=k) ['leftstart']
+% termprob3 - termprob3(j,f) = P(F3(t)=f|Q3(t)=j) ['rightstop']
+%
+% leftstart means the model always starts in state 1.
+% rightstop means the model always finished in its last state (Qsize(d)).
+%
+% Q1:Q3 in slice 1 are of type tabular_CPD
+% Q1:Q3 in slice 2 are of type hhmmQ_CPD.
+% F2 is of type hhmmF_CPD, F3 is of type tabular_CPD.
+
+ss = 6; D = 3;
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; obs = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+names = {'Q1', 'Q2', 'Q3', 'F3', 'F2', 'obs'};
+
+intra = zeros(ss);
+intra(Q1, Q2) = 1;
+intra(Q2, [F2 Q3 obs]) = 1;
+intra(Q3, [F3 obs]) = 1;
+intra(F3, F2) = 1;
+
+inter = zeros(ss);
+inter(Q1,Q1) = 1;
+inter(Q2,Q2) = 1;
+inter(Q3,Q3) = 1;
+inter(F2,[Q1 Q2]) = 1;
+inter(F3,[Q2 Q3]) = 1;
+
+
+% get sizes of nodes
+args = varargin;
+nargs = length(args);
+Qsizes = [];
+Osize = 0;
+for i=1:2:nargs
+ switch args{i},
+ case 'Qsizes', Qsizes = args{i+1};
+ case 'Osize', Osize = args{i+1};
+ end
+end
+if isempty(Qsizes), error('must specify Qsizes'); end
+if Osize==0, error('must specify Osize'); end
+
+% set default params
+discrete_obs = 0;
+Oargs = {};
+startprob1 = 'ergodic';
+startprob2 = 'leftstart';
+startprob3 = 'leftstart';
+transprob1 = 'ergodic';
+transprob2 = 'leftright';
+transprob3 = 'leftright';
+termprob2 = 'rightstop';
+termprob3 = 'rightstop';
+
+
+for i=1:2:nargs
+ switch args{i},
+ case 'discrete_obs', discrete_obs = args{i+1};
+ case 'Oargs', Oargs = args{i+1};
+ case 'Q1args', Q1args = args{i+1};
+ case 'Q2args', Q2args = args{i+1};
+ case 'Q3args', Q3args = args{i+1};
+ case 'F2args', F2args = args{i+1};
+ case 'F3args', F3args = args{i+1};
+ end
+end
+
+
+ns = zeros(1,ss);
+ns(Qnodes) = Qsizes;
+ns(obs) = Osize;
+ns(Fnodes) = 2;
+
+dnodes = [Qnodes Fnodes];
+if discrete_obs
+ dnodes = [dnodes obs];
+end
+onodes = [obs];
+
+bnet = mk_dbn(intra, inter, ns, 'observed', onodes, 'discrete', dnodes, 'names', names);
+eclass = bnet.equiv_class;
+
+if strcmp(startprob1, 'ergodic')
+ startprob1 = normalise(ones(1,ns(Q1)));
+end
+if strcmp(startprob2, 'leftstart')
+ startprob2 = zeros(ns(Q1), ns(Q2));
+ starpbrob2(:, 1) = 1.0;
+end
+if strcmp(startprob3, 'leftstart')
+ startprob3 = zeros(ns(Q2), ns(Q3));
+ starpbrob3(:, 1) = 1.0;
+end
+
+if strcmp(termprob2, 'rightstop')
+ p = 0.9;
+ termprob2 = zeros(Qsize(2),2);
+ termprob2(:, 2) = p;
+ termprob2(:, 1) = 1-p;
+ termprob2(1:(Qsize(2)-1), 1) = 1;
+end
+if strcmp(termprob3, 'rightstop')
+ p = 0.9;
+ termprob3 = zeros(Qsize(3),2);
+ termprob3(:, 2) = p;
+ termprob3(:, 1) = 1-p;
+ termprob3(1:(Qsize(3)-1), 1) = 1;
+end
+
+
+% SLICE 1
+
+% We clamp untied nodes in the first slice, since their params can't be estimated
+% from just one sequence
+
+bnet.CPD{eclass(Q1,1)} = tabular_CPD(bnet, Q1, 'CPT', startprob1, 'adjustable', 0);
+bnet.CPD{eclass(Q2,1)} = tabular_CPD(bnet, Q2, 'CPT', startprob2, 'adjustable', 0);
+bnet.CPD{eclass(Q3,1)} = tabular_CPD(bnet, Q3, 'CPT', startprob3, 'adjustable', 0);
+
+bnet.CPD{eclass(F2,1)} = hhmmF_CPD(bnet, F2, Qnodes, 2, D, 'termprob', termprob2);
+bnet.CPD{eclass(F3,1)} = tabular_CPD(bnet, F3, 'CPT', termprob3);
+
+if discrete_obs
+ bnet.CPD{eclass(obs,1)} = tabular_CPD(bnet, obs, Oargs{:});
+else
+ bnet.CPD{eclass(obs,1)} = gaussian_CPD(bnet, obs, Oargs{:});
+end
+
+% SLICE 2
+
+bnet.CPD{eclass(Q1,2)} = hhmmQ_CPD(bnet, Q1+ss, Qnodes, 1, D, 'transprob', transprob1, 'startprob', startprob1);
+bnet.CPD{eclass(Q2,2)} = hhmmQ_CPD(bnet, Q2+ss, Qnodes, 2, D, 'transprob', transprob2, 'startprob', startprob2);
+bnet.CPD{eclass(Q3,2)} = hhmmQ_CPD(bnet, Q3+ss, Qnodes, 3, D, 'transprob', transprob3, 'startprob', startprob3);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_hhmm3_args.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/mk_hhmm3_args.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,165 @@
+function bnet = mk_hhmm3(varargin)
+% MK_HHMM3 Make a 3 level Hierarchical HMM
+% bnet = mk_hhmm3(...)
+%
+% 3-layer hierarchical HMM where level 1 only connects to level 2, not 3 or obs.
+% This enforces sub-models (which differ only in their Q1 index) to be shared.
+% Also, we enforce the fact that each model always starts in its initial state
+% and only finishes in its final state. However, the prob. of finishing (as opposed to
+% self-transitioning to the final state) can be learned.
+% The fact that we always finish from the same state means we do not need to condition
+% F(i) on Q(i-1), since finishing prob is indep of calling context.
+%
+% The DBN is the same as Fig 10 in my tech report.
+%
+% Q1 ----------> Q1
+% | / |
+% | / |
+% | F2 ------- |
+% | ^ \ |
+% | /| \ |
+% v | v v
+% Q2-| --------> Q2
+% /| | ^
+% / | | /|
+% | | F3 ---------/ |
+% | | ^ \ |
+% | v / v
+% | Q3 -----------> Q3
+% | |
+% \ |
+% v v
+% O
+%
+% Q1 (slice 1) is clamped to be uniform.
+% Q2 (slice 1) is clamped to always start in state 1.
+% Q3 (slice 1) is clamped to always start in state 1.
+% F3 by default will only finish if Q3 is in its last state (F3 is a tabular_CPD)
+% F2 by default gets the default hhmmF_CPD params.
+% Q1:Q3 (slice 2) by default gets the default hhmmQ_CPD params.
+% O by default gets the default tabular/Gaussian params.
+%
+% Optional arguments in name/value format [default]
+%
+% Qsizes - sizes at each level [ none ]
+% Osize - size of O node [ none ]
+% discrete_obs - 1 means O is tabular_CPD, 0 means O is gaussian_CPD [0]
+% Oargs - cell array of args to pass to the O CPD [ {} ]
+% Q1args - args to be passed to constructor for Q1 (slice 2) [ {} ]
+% Q2args - args to be passed to constructor for Q2 (slice 2) [ {} ]
+% Q3args - args to be passed to constructor for Q3 (slice 2) [ {} ]
+% F2args - args to be passed to constructor for F2 [ {} ]
+% F3args - args to be passed to constructor for F3 [ {'CPT', finish in last Q3 state} ]
+%
+
+ss = 6; D = 3;
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; obs = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+names = {'Q1', 'Q2', 'Q3', 'F3', 'F2', 'obs'};
+
+intra = zeros(ss);
+intra(Q1, Q2) = 1;
+intra(Q2, [F2 Q3 obs]) = 1;
+intra(Q3, [F3 obs]) = 1;
+intra(F3, F2) = 1;
+
+inter = zeros(ss);
+inter(Q1,Q1) = 1;
+inter(Q2,Q2) = 1;
+inter(Q3,Q3) = 1;
+inter(F2,[Q1 Q2]) = 1;
+inter(F3,[Q2 Q3]) = 1;
+
+
+% get sizes of nodes
+args = varargin;
+nargs = length(args);
+Qsizes = [];
+Osize = 0;
+for i=1:2:nargs
+ switch args{i},
+ case 'Qsizes', Qsizes = args{i+1};
+ case 'Osize', Osize = args{i+1};
+ end
+end
+if isempty(Qsizes), error('must specify Qsizes'); end
+if Osize==0, error('must specify Osize'); end
+
+% set default params
+discrete_obs = 0;
+Oargs = {};
+Q1args = {};
+Q2args = {};
+Q3args = {};
+F2args = {};
+
+% P(Q3, F3)
+CPT = zeros(Qsizes(3), 2);
+% Each model can only terminate in its final state.
+% 0 params will remain 0 during EM, thus enforcing this constraint.
+CPT(:, 1) = 1.0; % all states turn F off ...
+p = 0.5;
+CPT(Qsizes(3), 2) = p; % except the last one
+CPT(Qsizes(3), 1) = 1-p;
+F3args = {'CPT', CPT};
+
+for i=1:2:nargs
+ switch args{i},
+ case 'discrete_obs', discrete_obs = args{i+1};
+ case 'Oargs', Oargs = args{i+1};
+ case 'Q1args', Q1args = args{i+1};
+ case 'Q2args', Q2args = args{i+1};
+ case 'Q3args', Q3args = args{i+1};
+ case 'F2args', F2args = args{i+1};
+ case 'F3args', F3args = args{i+1};
+ end
+end
+
+ns = zeros(1,ss);
+ns(Qnodes) = Qsizes;
+ns(obs) = Osize;
+ns(Fnodes) = 2;
+
+dnodes = [Qnodes Fnodes];
+if discrete_obs
+ dnodes = [dnodes obs];
+end
+onodes = [obs];
+
+bnet = mk_dbn(intra, inter, ns, 'observed', onodes, 'discrete', dnodes, 'names', names);
+eclass = bnet.equiv_class;
+
+% SLICE 1
+
+% We clamp untied nodes in the first slice, since their params can't be estimated
+% from just one sequence
+
+% uniform prior on initial model
+CPT = normalise(ones(1,ns(Q1)));
+bnet.CPD{eclass(Q1,1)} = tabular_CPD(bnet, Q1, 'CPT', CPT, 'adjustable', 0);
+
+% each model always starts in state 1
+CPT = zeros(ns(Q1), ns(Q2));
+CPT(:, 1) = 1.0;
+bnet.CPD{eclass(Q2,1)} = tabular_CPD(bnet, Q2, 'CPT', CPT, 'adjustable', 0);
+
+% each model always starts in state 1
+CPT = zeros(ns(Q2), ns(Q3));
+CPT(:, 1) = 1.0;
+bnet.CPD{eclass(Q3,1)} = tabular_CPD(bnet, Q3, 'CPT', CPT, 'adjustable', 0);
+
+bnet.CPD{eclass(F2,1)} = hhmmF_CPD(bnet, F2, Qnodes, 2, D, F2args{:});
+
+bnet.CPD{eclass(F3,1)} = tabular_CPD(bnet, F3, F3args{:});
+
+if discrete_obs
+ bnet.CPD{eclass(obs,1)} = tabular_CPD(bnet, obs, Oargs{:});
+else
+ bnet.CPD{eclass(obs,1)} = gaussian_CPD(bnet, obs, Oargs{:});
+end
+
+% SLICE 2
+
+bnet.CPD{eclass(Q1,2)} = hhmmQ_CPD(bnet, Q1+ss, Qnodes, 1, D, Q1args{:});
+bnet.CPD{eclass(Q2,2)} = hhmmQ_CPD(bnet, Q2+ss, Qnodes, 2, D, Q2args{:});
+bnet.CPD{eclass(Q3,2)} = hhmmQ_CPD(bnet, Q3+ss, Qnodes, 3, D, Q3args{:});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/motif_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/motif_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,95 @@
+% Make the following HHMM
+%
+% S1 <----------------------> S2
+% | |
+% | |
+% M1 -> M2 -> M3 -> end B1 -> end
+%
+% where Mi represents the i'th letter in the motif
+% and B is the background state.
+% Si chooses between running the motif or the background.
+% The Si and B states have self loops (not shown).
+
+if 0
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+end
+
+chars = ['a', 'c', 'g', 't'];
+Osize = length(chars);
+
+motif_pattern = 'acca';
+motif_length = length(motif_pattern);
+Qsize = [2 motif_length];
+Qnodes = 1:2;
+D = 2;
+transprob = cell(1,D);
+termprob = cell(1,D);
+startprob = cell(1,D);
+
+% startprob{d}(k,j), startprob{1}(1,j)
+% transprob{d}(i,k,j), transprob{1}(i,j)
+% termprob{d}(k,j)
+
+
+% LEVEL 1
+
+startprob{1} = zeros(1, 2);
+startprob{1} = [1 0]; % always start in the background model
+
+% When in the background state, we stay there with high prob
+% When in the motif state, we immediately return to the background state.
+transprob{1} = [0.8 0.2;
+ 1.0 0.0];
+
+
+% LEVEL 2
+startprob{2} = 'leftstart'; % both submodels start in substate 1
+transprob{2} = zeros(motif_length, 2, motif_length);
+termprob{2} = zeros(2, motif_length);
+
+% In the background model, we only use state 1.
+transprob{2}(1,1,1) = 1; % self loop
+termprob{2}(1,1) = 0.2; % prob transition to end state
+
+% Motif model
+transprob{2}(:,2,:) = mk_leftright_transmat(motif_length, 0);
+termprob{2}(2,end) = 1.0; % last state immediately terminates
+
+
+% OBS LEVEl
+
+obsprob = zeros([Qsize Osize]);
+if 0
+ % uniform background model
+ obsprob(1,1,:) = normalise(ones(Osize,1));
+else
+ % deterministic background model (easy to see!)
+ m = find(chars=='t');
+ obsprob(1,1,m) = 1.0;
+end
+if 1
+ % initialise with true motif (cheating)
+ for i=1:motif_length
+ m = find(chars == motif_pattern(i));
+ obsprob(2,i,m) = 1.0;
+ end
+end
+
+Oargs = {'CPT', obsprob};
+
+[bnet, Qnodes, Fnodes, Onode] = mk_hhmm('Qsizes', Qsize, 'Osize', Osize, 'discrete_obs', 1, ...
+ 'Oargs', Oargs, 'Ops', Qnodes(1:2), ...
+ 'startprob', startprob, 'transprob', transprob, 'termprob', termprob);
+
+
+Tmax = 20;
+usecell = 0;
+
+for seqi=1:5
+ evidence = sample_dbn(bnet, Tmax, usecell);
+ chars(evidence(end,:))
+ %T = size(evidence, 2)
+ %pretty_print_hhmm_parse(evidence, Qnodes, Fnodes, Onode, chars);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/remove_hhmm_end_state.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Old/remove_hhmm_end_state.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+function [transprob, termprob] = remove_hhmm_end_state(A)
+% REMOVE_END_STATE Infer transition and termination probabilities from automaton with an end state
+% [transprob, termprob] = remove_end_state(A)
+% A(i,k,j) = Pr( i->j | Qps=k), where i in 1:Q, j in 1:(Q+1), and Q+1 is the end state
+
+if ndims(A)==2 % top level
+ Q = size(A,1);
+ transprob = A(:,1:Q);
+ termprob = A(:,Q+1)';
+
+ % rescale
+ for i=1:Q
+ for j=1:Q
+ denom = (1-termprob(i));
+ denom = denom + (denom==0)*eps;
+ transprob(i,j) = transprob(i,j) / denom;
+ end
+ end
+else
+ Q = size(A,1);
+ Qk = size(A,2);
+ transprob = A(:, :, 1:Q);
+ termprob = A(:,:,Q+1)';
+
+ % rescale
+ for k=1:Qk
+ for i=1:Q
+ for j=1:Q
+ denom = (1-termprob(k,i));
+ denom = denom + (denom==0)*eps;
+ transprob(i,k,j) = transprob(i,k,j) / denom;
+ end
+ end
+ end
+
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+/get_square_data.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/hhmm_inference.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/is_F2_true_D3.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/learn_square_hhmm_cts.m/1.1.1.1/Thu Jun 20 00:19:22 2002//
+/learn_square_hhmm_discrete.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_square_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/plot_square_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_square_hhmm_cts.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_square_hhmm_discrete.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/square4.mat/1.1.1.1/Wed May 29 15:59:54 2002//
+/square4_cases.mat/1.1.1.1/Wed May 29 15:59:54 2002//
+/test_square_fig.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/test_square_fig.mat/1.1.1.1/Wed May 29 15:59:54 2002//
+D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/HHMM/Square
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/learn_square_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_square_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/plot_square_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_square_hhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/HHMM/Square/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/learn_square_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/learn_square_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,294 @@
+% Learn a 3 level HHMM similar to mk_square_hhmm
+
+% Because startprob should be shared for t=1:T,
+% but in the DBN is shared for t=2:T, we train using a single long sequence.
+
+discrete_obs = 0;
+supervised = 1;
+obs_finalF2 = 0;
+% It is not possible to observe F2 if we learn
+% because the update_ess method for hhmmF_CPD and hhmmQ_CPD assume
+% the F nodes are always hidden (for speed).
+% However, for generating, we might want to set the final F2=true
+% to force all subroutines to finish.
+
+ss = 6;
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+if discrete_obs
+ Qsizes = [2 4 2];
+else
+ Qsizes = [2 4 1];
+end
+
+D = 3;
+Qnodes = 1:D;
+startprob = cell(1,D);
+transprob = cell(1,D);
+termprob = cell(1,D);
+
+startprob{1} = 'unif';
+transprob{1} = 'unif';
+
+% In the unsupervised case, it is essential that we break symmetry
+% in the initial param estimates.
+%startprob{2} = 'unif';
+%transprob{2} = 'unif';
+%termprob{2} = 'unif';
+startprob{2} = 'rnd';
+transprob{2} = 'rnd';
+termprob{2} = 'rnd';
+
+leftright = 0;
+if leftright
+ % Initialise base-level models as left-right.
+ % If we initialise with delta functions,
+ % they will remain delat funcitons after learning
+ startprob{3} = 'leftstart';
+ transprob{3} = 'leftright';
+ termprob{3} = 'rightstop';
+else
+ % If we want to be able to run a base-level model backwards...
+ startprob{3} = 'rnd';
+ transprob{3} = 'rnd';
+ termprob{3} = 'rnd';
+end
+
+if discrete_obs
+ % Initialise observations of lowest level primitives in a way which we can interpret
+ chars = ['L', 'l', 'U', 'u', 'R', 'r', 'D', 'd'];
+ L=find(chars=='L'); l=find(chars=='l');
+ U=find(chars=='U'); u=find(chars=='u');
+ R=find(chars=='R'); r=find(chars=='r');
+ D=find(chars=='D'); d=find(chars=='d');
+ Osize = length(chars);
+
+ p = 0.9;
+ obsprob = (1-p)*ones([4 2 Osize]);
+ % Q2 Q3 O
+ obsprob(1, 1, L) = p;
+ obsprob(1, 2, l) = p;
+ obsprob(2, 1, U) = p;
+ obsprob(2, 2, u) = p;
+ obsprob(3, 1, R) = p;
+ obsprob(3, 2, r) = p;
+ obsprob(4, 1, D) = p;
+ obsprob(4, 2, d) = p;
+ obsprob = mk_stochastic(obsprob);
+ Oargs = {'CPT', obsprob};
+
+else
+ % Initialise means of lowest level primitives in a way which we can interpret
+ % These means are little vectors in the east, south, west, north directions.
+ % (left-right=east, up-down=south, right-left=west, down-up=north)
+ Osize = 2;
+ mu = zeros(2, Qsizes(2), Qsizes(3));
+ noise = 0;
+ scale = 3;
+ for q3=1:Qsizes(3)
+ mu(:, 1, q3) = scale*[1;0] + noise*rand(2,1);
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 2, q3) = scale*[0;-1] + noise*rand(2,1);
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 3, q3) = scale*[-1;0] + noise*rand(2,1);
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 4, q3) = scale*[0;1] + noise*rand(2,1);
+ end
+ Sigma = repmat(reshape(scale*eye(2), [2 2 1 1 ]), [1 1 Qsizes(2) Qsizes(3)]);
+ Oargs = {'mean', mu, 'cov', Sigma, 'cov_type', 'diag'};
+end
+
+bnet = mk_hhmm('Qsizes', Qsizes, 'Osize', Osize', 'discrete_obs', discrete_obs,...
+ 'Oargs', Oargs, 'Ops', Qnodes(2:3), ...
+ 'startprob', startprob, 'transprob', transprob, 'termprob', termprob);
+
+if supervised
+ bnet.observed = [Q1 Q2 Onode];
+else
+ bnet.observed = [Onode];
+end
+
+if obs_finalF2
+ engine = jtree_dbn_inf_engine(bnet);
+ % can't use ndx version because sometimes F2 is hidden, sometimes observed
+ error('can''t observe F when learning')
+else
+ if supervised
+ engine = jtree_ndx_dbn_inf_engine(bnet);
+ else
+ engine = jtree_hmm_inf_engine(bnet);
+ end
+end
+
+if discrete_obs
+ % generate some synthetic data (easier to debug)
+ cases = {};
+
+ T = 8;
+ ev = cell(ss, T);
+ ev(Onode,:) = num2cell([L l U u R r D d]);
+ if supervised
+ ev(Q1,:) = num2cell(1*ones(1,T));
+ ev(Q2,:) = num2cell( [1 1 2 2 3 3 4 4]);
+ end
+ cases{1} = ev;
+ cases{3} = ev;
+
+ T = 8;
+ ev = cell(ss, T);
+ if leftright % base model is left-right
+ ev(Onode,:) = num2cell([R r U u L l D d]);
+ else
+ ev(Onode,:) = num2cell([r R u U l L d D]);
+ end
+ if supervised
+ ev(Q1,:) = num2cell(2*ones(1,T));
+ ev(Q2,:) = num2cell( [3 3 2 2 1 1 4 4]);
+ end
+
+ cases{2} = ev;
+ cases{4} = ev;
+
+ if obs_finalF2
+ for i=1:length(cases)
+ T = size(cases{i},2);
+ cases{i}(F2,T)={2}; % force F2 to be finished at end of seq
+ end
+ end
+
+ if 0
+ ev = cases{4};
+ engine2 = enter_evidence(engine2, ev);
+ T = size(ev,2);
+ for t=1:T
+ m=marginal_family(engine2, F2, t);
+ fprintf('t=%d\n', t);
+ reshape(m.T, [2 2])
+ end
+ end
+
+ % [bnet2, LL] = learn_params_dbn_em(engine, cases, 'max_iter', 10);
+ long_seq = cat(2, cases{:});
+ [bnet2, LL, engine2] = learn_params_dbn_em(engine, {long_seq}, 'max_iter', 200);
+
+ % figure out which subsequence each model is responsible for
+ mpe = calc_mpe_dbn(engine2, long_seq);
+ pretty_print_hhmm_parse(mpe, Qnodes, Fnodes, Onode, chars);
+
+else
+ load 'square4_cases' % cases{seq}{i,t} for i=1:ss
+ %plot_square_hhmm(cases{1})
+ %long_seq = cat(2, cases{:});
+ train_cases = cases(1:2);
+ long_seq = cat(2, train_cases{:});
+ if ~supervised
+ T = size(long_seq,2);
+ for t=1:T
+ long_seq{Q1,t} = [];
+ long_seq{Q2,t} = [];
+ end
+ end
+ [bnet2, LL, engine2] = learn_params_dbn_em(engine, {long_seq}, 'max_iter', 100);
+
+ CPDO=struct(bnet2.CPD{eclass(Onode,1)});
+ mu = CPDO.mean;
+ Sigma = CPDO.cov;
+ CPDO_full = CPDO;
+
+ % force diagonal covs after training
+ for k=1:size(Sigma,3)
+ Sigma(:,:,k) = diag(diag(Sigma(:,:,k)));
+ end
+ bnet2.CPD{6} = set_fields(bnet.CPD{6}, 'cov', Sigma);
+
+ if 0
+ % visualize each model by concatenating means for each model for nsteps in a row
+ nsteps = 5;
+ ev = cell(ss, nsteps*prod(Qsizes(2:3)));
+ t = 1;
+ for q2=1:Qsizes(2)
+ for q3=1:Qsizes(3)
+ for i=1:nsteps
+ ev{Onode,t} = mu(:,q2,q3);
+ ev{Q2,t} = q2;
+ t = t + 1;
+ end
+ end
+ end
+ plot_square_hhmm(ev)
+ end
+
+ % bnet3 is the same as the learned model, except we will use it in testing mode
+ if supervised
+ bnet3 = bnet2;
+ bnet3.observed = [Onode];
+ engine3 = hmm_inf_engine(bnet3);
+ %engine3 = jtree_ndx_dbn_inf_engine(bnet3);
+ else
+ bnet3 = bnet2;
+ engine3 = engine2;
+ end
+
+ if 0
+ % segment whole sequence
+ mpe = calc_mpe_dbn(engine3, long_seq);
+ pretty_print_hhmm_parse(mpe, Qnodes, Fnodes, Onode, []);
+ end
+
+ % segment each sequence
+ test_cases = cases(3:4);
+ for i=1:2
+ ev = test_cases{i};
+ T = size(ev, 2);
+ for t=1:T
+ ev{Q1,t} = [];
+ ev{Q2,t} = [];
+ end
+ mpe = calc_mpe_dbn(engine3, ev);
+ subplot(1,2,i)
+ plot_square_hhmm(mpe)
+ %pretty_print_hhmm_parse(mpe, Qnodes, Fnodes, Onode, []);
+ q1s = cell2num(mpe(Q1,:));
+ h = hist(q1s, 1:Qsizes(1));
+ map_q1 = argmax(h);
+ str = sprintf('test seq %d is of type %d\n', i, map_q1);
+ title(str)
+ end
+
+end
+
+if 0
+% Estimate gotten by couting transitions in the labelled data
+% Note that a self transition shouldnt count if F2=off.
+Q2ev = cell2num(ev(Q2,:));
+Q2a = Q2ev(1:end-1);
+Q2b = Q2ev(2:end);
+counts = compute_counts([Q2a; Q2b], [4 4]);
+end
+
+eclass = bnet2.equiv_class;
+CPDQ1=struct(bnet2.CPD{eclass(Q1,2)});
+CPDQ2=struct(bnet2.CPD{eclass(Q2,2)});
+CPDQ3=struct(bnet2.CPD{eclass(Q3,2)});
+CPDF2=struct(bnet2.CPD{eclass(F2,1)});
+CPDF3=struct(bnet2.CPD{eclass(F3,1)});
+
+
+A=add_hhmm_end_state(CPDQ2.transprob, CPDF2.termprob(:,:,2));
+squeeze(A(:,1,:))
+squeeze(A(:,2,:))
+CPDQ2.startprob
+
+if 0
+S=struct(CPDF2.sub_CPD_term);
+S.nsamples
+reshape(S.counts, [2 4 2])
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/mk_square_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/mk_square_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,183 @@
+function bnet = mk_square_hhmm(discrete_obs, true_params, topright)
+
+% Make a 3 level HHMM described by the following grammar
+%
+% Square -> CLK | CCK % clockwise or counterclockwise
+% CLK -> LR UD RL DU start on top left (1 2 3 4)
+% CCK -> RL UD LR DU if start at top right (3 2 1 4)
+% CCK -> UD LR DU RL if start at top left (2 1 4 3)
+%
+% LR = left-right, UD = up-down, RL = right-left, DU = down-up
+% LR, UD, RL, DU are sub HMMs.
+%
+% For discrete observations, the subHMMs are 2-state left-right.
+% LR emits L then l, etc.
+%
+% For cts observations, the subHMMs are 1 state.
+% LR emits a vector in the -> direction, with a little noise.
+% Since there is no constraint that we remain in the LR state as long as the RL state,
+% the sides of the square might have different lengths,
+% so the result is not really a square!
+%
+% If true_params = 0, we use random parameters at the top 2 levels
+% (ready for learning). At the bottom level, we use noisy versions
+% of the "true" observations.
+%
+% If topright=1, counter-clockwise starts at top right, not top left
+% This example was inspired by Ivanov and Bobick.
+
+if nargin < 3, topright = 1; end
+
+if 1 % discrete_obs
+ Qsizes = [2 4 2];
+else
+ Qsizes = [2 4 1];
+end
+
+D = 3;
+Qnodes = 1:D;
+startprob = cell(1,D);
+transprob = cell(1,D);
+termprob = cell(1,D);
+
+% LEVEL 1
+
+startprob{1} = 'unif';
+transprob{1} = 'unif';
+
+% LEVEL 2
+
+if true_params
+ startprob{2} = zeros(2, 4);
+ startprob{2}(1, :) = [1 0 0 0];
+ if topright
+ startprob{2}(2, :) = [0 0 1 0];
+ else
+ startprob{2}(2, :) = [0 1 0 0];
+ end
+
+ transprob{2} = zeros(4, 2, 4);
+
+ transprob{2}(:,1,:) = [0 1 0 0
+ 0 0 1 0
+ 0 0 0 1
+ 0 0 0 1]; % 4->e
+ if topright
+ transprob{2}(:,2,:) = [0 0 0 1
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 0 1]; % 4->e
+ else
+ transprob{2}(:,2,:) = [0 0 0 1
+ 1 0 0 0
+ 0 0 1 0 % 3->e
+ 0 0 1 0];
+ end
+
+ %termprob{2} = 'rightstop';
+ termprob{2} = zeros(2,4,2);
+ pfin = 0.8;
+ termprob{2}(1,:,2) = [0 0 0 pfin]; % finish in state 4 (DU)
+ termprob{2}(1,:,1) = 1 - [0 0 0 pfin];
+ if topright
+ termprob{2}(2,:,2) = [0 0 0 pfin];
+ termprob{2}(2,:,1) = 1 - [0 0 0 pfin];
+ else
+ termprob{2}(2,:,2) = [0 0 pfin 0]; % finish in state 3 (RL)
+ termprob{2}(2,:,1) = 1 - [0 0 pfin 0];
+ end
+else
+ % In the unsupervised case, it is essential that we break symmetry
+ % in the initial param estimates.
+ %startprob{2} = 'unif';
+ %transprob{2} = 'unif';
+ %termprob{2} = 'unif';
+ startprob{2} = 'rnd';
+ transprob{2} = 'rnd';
+ termprob{2} = 'rnd';
+end
+
+% LEVEL 3
+
+if 1 | true_params
+ startprob{3} = 'leftstart';
+ transprob{3} = 'leftright';
+ termprob{3} = 'rightstop';
+else
+ % If we want to be able to run a base-level model backwards...
+ startprob{3} = 'rnd';
+ transprob{3} = 'rnd';
+ termprob{3} = 'rnd';
+end
+
+
+% OBS LEVEl
+
+if discrete_obs
+ % Initialise observations of lowest level primitives in a way which we can interpret
+ chars = ['L', 'l', 'U', 'u', 'R', 'r', 'D', 'd'];
+ L=find(chars=='L'); l=find(chars=='l');
+ U=find(chars=='U'); u=find(chars=='u');
+ R=find(chars=='R'); r=find(chars=='r');
+ D=find(chars=='D'); d=find(chars=='d');
+ Osize = length(chars);
+
+ if true_params
+ p = 1; % makes each state fully observed
+ else
+ p = 0.9;
+ end
+
+ obsprob = (1-p)*ones([4 2 Osize]);
+ % Q2 Q3 O
+ obsprob(1, 1, L) = p;
+ obsprob(1, 2, l) = p;
+ obsprob(2, 1, U) = p;
+ obsprob(2, 2, u) = p;
+ obsprob(3, 1, R) = p;
+ obsprob(3, 2, r) = p;
+ obsprob(4, 1, D) = p;
+ obsprob(4, 2, d) = p;
+ obsprob = mk_stochastic(obsprob);
+ Oargs = {'CPT', obsprob};
+else
+ % Initialise means of lowest level primitives in a way which we can interpret
+ % These means are little vectors in the east, south, west, north directions.
+ % (left-right=east, up-down=south, right-left=west, down-up=north)
+ Osize = 2;
+ mu = zeros(2, Qsizes(2), Qsizes(3));
+ scale = 3;
+ if true_params
+ noise = 0;
+ else
+ noise = 0.5*scale;
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 1, q3) = scale*[1;0] + noise*rand(2,1);
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 2, q3) = scale*[0;-1] + noise*rand(2,1);
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 3, q3) = scale*[-1;0] + noise*rand(2,1);
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 4, q3) = scale*[0;1] + noise*rand(2,1);
+ end
+ Sigma = repmat(reshape(scale*eye(2), [2 2 1 1 ]), [1 1 Qsizes(2) Qsizes(3)]);
+ Oargs = {'mean', mu, 'cov', Sigma, 'cov_type', 'diag'};
+end
+
+if discrete_obs
+ selfprob = 0.5;
+else
+ selfprob = 0.95;
+ % If less than this, it won't look like a square
+ % because it doesn't spend enough time in each state
+ % Unfortunately, the variance on durations (lengths of each side)
+ % is very large
+end
+bnet = mk_hhmm('Qsizes', Qsizes, 'Osize', Osize', 'discrete_obs', discrete_obs, ...
+ 'Oargs', Oargs, 'Ops', Qnodes(2:3), 'selfprob', selfprob, ...
+ 'startprob', startprob, 'transprob', transprob, 'termprob', termprob);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/plot_square_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/plot_square_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function plot_square_hhmm(ev)
+% Plot the square shape implicit in the evidence.
+% ev{i,t} is the value of node i in slice t.
+% The observed node contains a velocity (delta increment), which is converted
+% into a position.
+% The Q2 node specifies which model is used; each segment is color-coded
+% in the order red, green, blue, black.
+
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+
+delta = cell2num(ev(Onode,:)); % delta(:,t)
+Q2label = cell2num(ev(Q2,:));
+
+T = size(delta, 2);
+pos = zeros(2,T+1);
+clf
+hold on
+cols = {'r', 'g', 'b', 'k'};
+boundary = 0;
+coli = 1;
+for t=2:T+1
+ pos(:,t) = pos(:,t-1) + delta(:,t-1);
+ plot(pos(1,t), pos(2,t), sprintf('%c.', cols{coli}));
+ if t < T
+ boundary = (Q2label(t) ~= Q2label(t-1));
+ end
+ if boundary
+ coli = coli + 1;
+ coli = mod(coli-1, length(cols)) + 1;
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/sample_square_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/Old/sample_square_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,160 @@
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+discrete_obs = 1;
+topright = 0;
+
+Qsizes = [2 4 2];
+D = 3;
+Qnodes = 1:D;
+startprob = cell(1,D);
+transprob = cell(1,D);
+termprob = cell(1,D);
+
+% LEVEL 1
+
+startprob{1} = 'ergodic';
+transprob{1} = 'ergodic';
+
+% LEVEL 2
+
+startprob{2} = zeros(2, 4);
+startprob{2}(1, :) = [1 0 0 0];
+if topright
+ startprob{2}(2, :) = [0 0 1 0];
+else
+ startprob{2}(2, :) = [0 1 0 0];
+end
+
+transprob{2} = zeros(4, 2, 4);
+
+transprob{2}(:,1,:) = [0 1 0 0
+ 0 0 1 0
+ 0 0 0 1
+ 0 0 0 1]; % 4->e
+if topright
+ transprob{2}(:,2,:) = [0 0 0 1
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 0 1]; % 4->e
+else
+ transprob{2}(:,2,:) = [0 0 0 1
+ 1 0 0 0
+ 0 0 1 0 % 3->e
+ 0 0 1 0];
+end
+
+%termprob{2} = 'rightstop';
+termprob{2} = zeros(2,4,2);
+pfin = 0.8;
+termprob{2}(1,:,2) = [0 0 0 pfin]; % finish in state 4 (DU)
+termprob{2}(1,:,1) = 1 - [0 0 0 pfin];
+if topright
+ termprob{2}(2,:,2) = [0 0 0 pfin];
+ termprob{2}(2,:,1) = 1 - [0 0 0 pfin];
+else
+ termprob{2}(2,:,2) = [0 0 pfin 0]; % finish in state 3 (RL)
+ termprob{2}(2,:,1) = 1 - [0 0 pfin 0];
+end
+
+% LEVEL 3
+
+startprob{3} = 'leftstart';
+transprob{3} = 'leftright';
+termprob{3} = 'rightstop';
+
+
+% OBS LEVEl
+
+if discrete_obs
+ chars = ['L', 'l', 'U', 'u', 'R', 'r', 'D', 'd'];
+ L=find(chars=='L'); l=find(chars=='l');
+ U=find(chars=='U'); u=find(chars=='u');
+ R=find(chars=='R'); r=find(chars=='r');
+ D=find(chars=='D'); d=find(chars=='d');
+ Osize = length(chars);
+
+ obsprob = zeros([4 2 Osize]);
+ % Q2 Q3 O
+ obsprob(1, 1, L) = 1.0;
+ obsprob(1, 2, l) = 1.0;
+ obsprob(2, 1, U) = 1.0;
+ obsprob(2, 2, u) = 1.0;
+ obsprob(3, 1, R) = 1.0;
+ obsprob(3, 2, r) = 1.0;
+ obsprob(4, 1, D) = 1.0;
+ obsprob(4, 2, d) = 1.0;
+
+ Oargs = {'CPT', obsprob};
+else
+ Osize = 2;
+ mu = zeros(2, 4, 2);
+ noise = 0;
+ scale = 10;
+ for q3=1:2
+ mu(:, 1, q3) = scale*[1;0] + noise*rand(2,1);
+ end
+ for q3=1:2
+ mu(:, 2, q3) = scale*[0;-1] + noise*rand(2,1);
+ end
+ for q3=1:2
+ mu(:, 3, q3) = scale*[-1;0] + noise*rand(2,1);
+ end
+ for q3=1:2
+ mu(:, 4, q3) = scale*[0;1] + noise*rand(2,1);
+ end
+ Sigma = repmat(reshape(0.01*eye(2), [2 2 1 1 ]), [1 1 4 2]);
+ Oargs = {'mean', mu, 'cov', Sigma};
+end
+
+bnet = mk_hhmm('Qsizes', Qsizes, 'Osize', Osize', 'discrete_obs', discrete_obs, ...
+ 'Oargs', Oargs, 'Ops', Qnodes(2:3), ...
+ 'startprob', startprob, 'transprob', transprob, 'termprob', termprob);
+
+if discrete_obs
+ Tmax = 30;
+else
+ Tmax = 200;
+end
+usecell = ~discrete_obs;
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+
+for seqi=1:3
+ evidence = sample_dbn(bnet, Tmax, usecell, 'stop_sampling_F2');
+ T = size(evidence, 2)
+ if discrete_obs
+ pretty_print_hhmm_parse(evidence, Qnodes, Fnodes, Onode, chars);
+ else
+ pos = zeros(2,T+1);
+ delta = cell2num(evidence(Onode,:));
+ clf
+ hold on
+ cols = {'r', 'g', 'k', 'b'};
+ boundary = cell2num(evidence(F3,:))-1;
+ coli = 1;
+ for t=2:T+1
+ pos(:,t) = pos(:,t-1) + delta(:,t-1);
+ plot(pos(1,t), pos(2,t), sprintf('%c.', cols{coli}));
+ if boundary(t-1)
+ coli = coli + 1;
+ coli = mod(coli-1, length(cols)) + 1;
+ end
+ end
+ %plot(pos(1,:), pos(2,:), '.')
+ %pretty_print_hhmm_parse(evidence, Qnodes, Fnodes, Onode, []);
+ pause
+ end
+end
+
+eclass = bnet.equiv_class;
+S=struct(bnet.CPD{eclass(Q2,2)});
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/get_square_data.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/get_square_data.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,70 @@
+% Let the user draw a square with the mouse,
+% and then click on the corners to do a manual segmentation
+
+ss = 6;
+Q1 = 1; Q2 = 2; Q3 = 3; obsvel = 6;
+CLOCKWISE = 1; ANTICLOCK = 2;
+LR = 1; UD = 2; RL = 3; DU = 4;
+
+% repeat this block manually incrementing the sequence number
+% and setting ori.
+% (since I don't know how to call getmouse as a call-return function).
+seq = 4;
+%ori = CLOCKWISE
+ori = ANTICLOCK;
+clear xpos ypos
+getmouse
+% end block
+
+% manual segmentation with the mouse
+startseg(1) = 1;
+for i=2:4
+ fprintf('click on start of segment %d\n', i);
+ [x,y] = ginput(1);
+ plot(x,y,'ro')
+ d = dist2([xpos; ypos]', [x y]);
+ startseg(i) = argmin(d);
+end
+
+% plot corners in green
+%ti = first point in (i+1)st segment
+t1 = startseg(1); t2 = startseg(2); t3 = startseg(3); t4 = startseg(4);
+plot(xpos(t2), ypos(t2), 'g*')
+plot(xpos(t3), ypos(t3), 'g*')
+plot(xpos(t4), ypos(t4), 'g*')
+
+
+xvel = xpos(2:end) - xpos(1:end-1);
+yvel = ypos(2:end) - ypos(1:end-1);
+speed = [xvel(:)'; yvel(:)'];
+pos_data{seq} = [xpos(:)'; ypos(:)'];
+vel_data{seq} = [xvel(:)'; yvel(:)'];
+T = length(xvel);
+Q1label{seq} = num2cell(repmat(ori, 1, T));
+Q2label{seq} = zeros(1, T);
+if ori == CLOCKWISE
+ Q2label{seq}(t1:t2) = LR;
+ Q2label{seq}(t2+1:t3) = UD;
+ Q2label{seq}(t3+1:t4) = RL;
+ Q2label{seq}(t4+1:T) = DU;
+else
+ Q2label{seq}(t1:t2) = RL;
+ Q2label{seq}(t2+1:t3) = UD;
+ Q2label{seq}(t3+1:t4) = LR;
+ Q2label{seq}(t4+1:T) = DU;
+end
+
+% pos_data{seq}(:,t), vel_data{seq}(:,t) Q1label{seq}(t) Q2label{seq}(t)
+save 'square4' pos_data vel_data Q1label Q2label
+
+nseq = 4;
+cases = cell(1,nseq);
+for seq=1:nseq
+ T = size(vel_data{seq},2);
+ ev = cell(ss,T);
+ ev(obsvel,:) = num2cell(vel_data{seq},1);
+ ev(Q1,:) = Q1label{seq};
+ ev(Q2,:) = num2cell(Q2label{seq});
+ cases{seq} = ev;
+end
+save 'square4_cases' cases
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/hhmm_inference.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/hhmm_inference.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+bnet = mk_square_hhmm(1, 1);
+
+engine = {};
+engine{end+1} = hmm_inf_engine(bnet);
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+
+exact = 1:length(engine);
+filter = 0;
+single = 0;
+maximize = 0;
+T = 4;
+
+[err, inf_time, engine] = cmp_inference(bnet, engine, exact, T, filter, single, maximize);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/is_F2_true_D3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/is_F2_true_D3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function stop = is_F2_true_D3(vals)
+% function stop = is_F2_true_D3(vals)
+%
+% If vals(F2)=2 then level 2 has finished, so we return stop=1
+% to stop sample_dbn. Otherwise we return stop=0.
+% We assume this is for a D=3 level HHMM.
+
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+stop = 0;
+if (iscell(vals) & vals{F2}==2) | (~iscell(vals) & vals(F2)==2)
+ stop = 1;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/learn_square_hhmm_cts.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/learn_square_hhmm_cts.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,152 @@
+% Try to learn a 3 level HHMM similar to mk_square_hhmm
+% from hand-drawn squares.
+
+% Because startprob should be shared for t=1:T,
+% but in the DBN is shared for t=2:T, we train using a single long sequence.
+
+discrete_obs = 0;
+supervised = 1;
+obs_finalF2 = 0;
+% It is not possible to observe F2 if we learn
+% because the update_ess method for hhmmF_CPD and hhmmQ_CPD assume
+% the F nodes are always hidden (for speed).
+% However, for generating, we might want to set the final F2=true
+% to force all subroutines to finish.
+
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+bnet = mk_square_hhmm(discrete_obs, 0);
+
+ss = 6;
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+Qsizes = [2 4 1];
+
+if supervised
+ bnet.observed = [Q1 Q2 Onode];
+else
+ bnet.observed = [Onode];
+end
+
+if obs_finalF2
+ engine = jtree_dbn_inf_engine(bnet);
+ % can't use ndx version because sometimes F2 is hidden, sometimes observed
+ error('can''t observe F when learning')
+else
+ if supervised
+ engine = jtree_ndx_dbn_inf_engine(bnet);
+ else
+ engine = jtree_hmm_inf_engine(bnet);
+ end
+end
+
+load 'square4_cases' % cases{seq}{i,t} for i=1:ss
+%plot_square_hhmm(cases{1})
+%long_seq = cat(2, cases{:});
+train_cases = cases(1:2);
+long_seq = cat(2, train_cases{:});
+if ~supervised
+ T = size(long_seq,2);
+ for t=1:T
+ long_seq{Q1,t} = [];
+ long_seq{Q2,t} = [];
+ end
+end
+[bnet2, LL, engine2] = learn_params_dbn_em(engine, {long_seq}, 'max_iter', 2);
+
+eclass = bnet2.equiv_class;
+CPDO=struct(bnet2.CPD{eclass(Onode,1)});
+mu = CPDO.mean;
+Sigma = CPDO.cov;
+CPDO_full = CPDO;
+
+% force diagonal covs after training
+for k=1:size(Sigma,3)
+ Sigma(:,:,k) = diag(diag(Sigma(:,:,k)));
+end
+bnet2.CPD{6} = set_fields(bnet.CPD{6}, 'cov', Sigma);
+
+if 0
+ % visualize each model by concatenating means for each model for nsteps in a row
+ nsteps = 5;
+ ev = cell(ss, nsteps*prod(Qsizes(2:3)));
+ t = 1;
+ for q2=1:Qsizes(2)
+ for q3=1:Qsizes(3)
+ for i=1:nsteps
+ ev{Onode,t} = mu(:,q2,q3);
+ ev{Q2,t} = q2;
+ t = t + 1;
+ end
+ end
+ end
+ plot_square_hhmm(ev)
+end
+
+% bnet3 is the same as the learned model, except we will use it in testing mode
+if supervised
+ bnet3 = bnet2;
+ bnet3.observed = [Onode];
+ engine3 = hmm_inf_engine(bnet3);
+ %engine3 = jtree_ndx_dbn_inf_engine(bnet3);
+else
+ bnet3 = bnet2;
+ engine3 = engine2;
+end
+
+if 0
+ % segment whole sequence
+ mpe = calc_mpe_dbn(engine3, long_seq);
+ pretty_print_hhmm_parse(mpe, Qnodes, Fnodes, Onode, []);
+end
+
+% segment each sequence
+test_cases = cases(3:4);
+for i=1:2
+ ev = test_cases{i};
+ T = size(ev, 2);
+ for t=1:T
+ ev{Q1,t} = [];
+ ev{Q2,t} = [];
+ end
+ %mpe = calc_mpe_dbn(engine3, ev);
+ mpe = find_mpe(engine3, ev)
+ subplot(1,2,i)
+ plot_square_hhmm(mpe)
+ %pretty_print_hhmm_parse(mpe, Qnodes, Fnodes, Onode, []);
+ q1s = cell2num(mpe(Q1,:));
+ h = hist(q1s, 1:Qsizes(1));
+ map_q1 = argmax(h);
+ str = sprintf('test seq %d is of type %d\n', i, map_q1);
+ title(str)
+end
+
+
+if 0
+% Estimate gotten by couting transitions in the labelled data
+% Note that a self transition shouldnt count if F2=off.
+Q2ev = cell2num(ev(Q2,:));
+Q2a = Q2ev(1:end-1);
+Q2b = Q2ev(2:end);
+counts = compute_counts([Q2a; Q2b], [4 4]);
+end
+
+eclass = bnet2.equiv_class;
+CPDQ1=struct(bnet2.CPD{eclass(Q1,2)});
+CPDQ2=struct(bnet2.CPD{eclass(Q2,2)});
+CPDQ3=struct(bnet2.CPD{eclass(Q3,2)});
+CPDF2=struct(bnet2.CPD{eclass(F2,1)});
+CPDF3=struct(bnet2.CPD{eclass(F3,1)});
+
+
+A=add_hhmm_end_state(CPDQ2.transprob, CPDF2.termprob(:,:,2));
+squeeze(A(:,1,:));
+CPDQ2.startprob;
+
+if 0
+S=struct(CPDF2.sub_CPD_term);
+S.nsamples
+reshape(S.counts, [2 4 2])
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/learn_square_hhmm_discrete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/learn_square_hhmm_discrete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,171 @@
+% Try to learn a 3 level HHMM similar to mk_square_hhmm
+% from synthetic discrete sequences
+
+
+discrete_obs = 1;
+supervised = 0;
+obs_finalF2 = 0;
+
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+bnet_init = mk_square_hhmm(discrete_obs, 0);
+
+ss = 6;
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+
+if supervised
+ bnet_init.observed = [Q1 Q2 Onode];
+else
+ bnet_init.observed = [Onode];
+end
+
+if obs_finalF2
+ engine_init = jtree_dbn_inf_engine(bnet_init);
+ % can't use ndx version because sometimes F2 is hidden, sometimes observed
+ error('can''t observe F when learning')
+ % It is not possible to observe F2 if we learn
+ % because the update_ess method for hhmmF_CPD and hhmmQ_CPD assume
+ % the F nodes are always hidden (for speed).
+ % However, for generating, we might want to set the final F2=true
+ % to force all subroutines to finish.
+else
+ if supervised
+ engine_init = jtree_ndx_dbn_inf_engine(bnet_init);
+ else
+ engine_init = hmm_inf_engine(bnet_init);
+ end
+end
+
+% generate some synthetic data (easier to debug)
+chars = ['L', 'l', 'U', 'u', 'R', 'r', 'D', 'd'];
+L=find(chars=='L'); l=find(chars=='l');
+U=find(chars=='U'); u=find(chars=='u');
+R=find(chars=='R'); r=find(chars=='r');
+D=find(chars=='D'); d=find(chars=='d');
+
+cases = {};
+
+T = 8;
+ev = cell(ss, T);
+ev(Onode,:) = num2cell([L l U u R r D d]);
+if supervised
+ ev(Q1,:) = num2cell(1*ones(1,T));
+ ev(Q2,:) = num2cell( [1 1 2 2 3 3 4 4]);
+end
+cases{1} = ev;
+cases{3} = ev;
+
+T = 8;
+ev = cell(ss, T);
+%we start with R then r, even though we are running the model 'backwards'!
+ev(Onode,:) = num2cell([R r U u L l D d]);
+
+if supervised
+ ev(Q1,:) = num2cell(2*ones(1,T));
+ ev(Q2,:) = num2cell( [3 3 2 2 1 1 4 4]);
+end
+
+cases{2} = ev;
+cases{4} = ev;
+
+if obs_finalF2
+ for i=1:length(cases)
+ T = size(cases{i},2);
+ cases{i}(F2,T)={2}; % force F2 to be finished at end of seq
+ end
+end
+
+
+% startprob should be shared for t=1:T,
+% but in the DBN it is shared for t=2:T,
+% so we train using a single long sequence.
+long_seq = cat(2, cases{:});
+[bnet_learned, LL, engine_learned] = ...
+ learn_params_dbn_em(engine_init, {long_seq}, 'max_iter', 200);
+
+% figure out which subsequence each model is responsible for
+mpe = calc_mpe_dbn(engine_learned, long_seq);
+pretty_print_hhmm_parse(mpe, Qnodes, Fnodes, Onode, chars);
+
+
+% The "true" segmentation of the training sequence is
+% Q1: 1 2
+% O: L l U u R r D d | R r U u L l D d | etc.
+%
+% When we learn in a supervised fashion, we recover the "truth".
+
+% When we learn in an unsupervised fashion with seed=1, we get
+% Q1: 2 1
+% O: L l U u R r D d R r | U u L l D d | etc.
+%
+% This means for model 1:
+% starts in state 2
+% transitions 2->1, 1->4, 4->e, 3->2
+%
+% For model 2,
+% starts in state 1
+% transitions 1->2, 2->3, 3->4 or e, 4->3
+
+% examine the params
+eclass = bnet_learned.equiv_class;
+CPDQ1=struct(bnet_learned.CPD{eclass(Q1,2)});
+CPDQ2=struct(bnet_learned.CPD{eclass(Q2,2)});
+CPDQ3=struct(bnet_learned.CPD{eclass(Q3,2)});
+CPDF2=struct(bnet_learned.CPD{eclass(F2,1)});
+CPDF3=struct(bnet_learned.CPD{eclass(F3,1)});
+CPDO=struct(bnet_learned.CPD{eclass(Onode,1)});
+
+A_learned =add_hhmm_end_state(CPDQ2.transprob, CPDF2.termprob(:,:,2));
+squeeze(A_learned(:,1,:))
+squeeze(A_learned(:,2,:))
+
+
+% Does the "true" model have higher likelihood than the learned one?
+% i.e., Does the unsupervised method learn the wrong model because
+% we have the wrong cost fn, or because of local minima?
+
+bnet_true = mk_square_hhmm(discrete_obs,1);
+
+% examine the params
+eclass = bnet_learned.equiv_class;
+CPDQ1_true=struct(bnet_true.CPD{eclass(Q1,2)});
+CPDQ2_true=struct(bnet_true.CPD{eclass(Q2,2)});
+CPDQ3_true=struct(bnet_true.CPD{eclass(Q3,2)});
+CPDF2_true=struct(bnet_true.CPD{eclass(F2,1)});
+CPDF3_true=struct(bnet_true.CPD{eclass(F3,1)});
+
+A_true =add_hhmm_end_state(CPDQ2_true.transprob, CPDF2_true.termprob(:,:,2));
+squeeze(A_true(:,1,:))
+
+
+if supervised
+ engine_true = jtree_ndx_dbn_inf_engine(bnet_true);
+else
+ engine_true = hmm_inf_engine(bnet_true);
+end
+
+%[engine_learned, ll_learned] = enter_evidence(engine_learned, long_seq);
+%[engine_true, ll_true] = enter_evidence(engine_true, long_seq);
+[engine_learned, ll_learned] = enter_evidence(engine_learned, cases{2});
+[engine_true, ll_true] = enter_evidence(engine_true, cases{2});
+ll_learned
+ll_true
+
+
+% remove concatentation artefacts
+ll_learned = 0;
+ll_true = 0;
+for m=1:length(cases)
+ [engine_learned, ll_learned_tmp] = enter_evidence(engine_learned, cases{m});
+ [engine_true, ll_true_tmp] = enter_evidence(engine_true, cases{m});
+ ll_learned = ll_learned + ll_learned_tmp;
+ ll_true = ll_true + ll_true_tmp;
+end
+ll_learned
+ll_true
+
+% In both cases, ll_learned >> ll_true
+% which shows we are using the wrong cost function!
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/mk_square_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/mk_square_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,180 @@
+function bnet = mk_square_hhmm(discrete_obs, true_params, topright)
+
+% Make a 3 level HHMM described by the following grammar
+%
+% Square -> CLK | CCK % clockwise or counterclockwise
+% CLK -> LR UD RL DU start on top left (1 2 3 4)
+% CCK -> RL UD LR DU if start at top right (3 2 1 4)
+% CCK -> UD LR DU RL if start at top left (2 1 4 3)
+%
+% LR = left-right, UD = up-down, RL = right-left, DU = down-up
+% LR, UD, RL, DU are sub HMMs.
+%
+% For discrete observations, the subHMMs are 2-state left-right.
+% LR emits L then l, etc.
+%
+% For cts observations, the subHMMs are 1 state.
+% LR emits a vector in the -> direction, with a little noise.
+% Since there is no constraint that we remain in the LR state as long as the RL state,
+% the sides of the square might have different lengths,
+% so the result is not really a square!
+%
+% If true_params = 0, we use random parameters at the top 2 levels
+% (ready for learning). At the bottom level, we use noisy versions
+% of the "true" observations.
+%
+% If topright=1, counter-clockwise starts at top right, not top left
+% This example was inspired by Ivanov and Bobick.
+
+if nargin < 3, topright = 1; end
+
+if 1 % discrete_obs
+ Qsizes = [2 4 2];
+else
+ Qsizes = [2 4 1];
+end
+
+D = 3;
+Qnodes = 1:D;
+startprob = cell(1,D);
+transprob = cell(1,D);
+termprob = cell(1,D);
+
+% LEVEL 1
+
+startprob{1} = 'unif';
+transprob{1} = 'unif';
+
+% LEVEL 2
+
+if true_params
+ startprob{2} = zeros(2, 4);
+ startprob{2}(1, :) = [1 0 0 0];
+ if topright
+ startprob{2}(2, :) = [0 0 1 0];
+ else
+ startprob{2}(2, :) = [0 1 0 0];
+ end
+
+ transprob{2} = zeros(4, 2, 4);
+
+ transprob{2}(:,1,:) = [0 1 0 0
+ 0 0 1 0
+ 0 0 0 1
+ 0 0 0 1]; % 4->e
+ if topright
+ transprob{2}(:,2,:) = [0 0 0 1
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 0 1]; % 4->e
+ else
+ transprob{2}(:,2,:) = [0 0 0 1
+ 1 0 0 0
+ 0 0 1 0 % 3->e
+ 0 0 1 0];
+ end
+
+ %termprob{2} = 'rightstop';
+ termprob{2} = zeros(2,4);
+ pfin = 0.8;
+ termprob{2}(1,:) = [0 0 0 pfin]; % finish in state 4 (DU)
+ if topright
+ termprob{2}(2,:) = [0 0 0 pfin];
+ else
+ termprob{2}(2,:) = [0 0 pfin 0]; % finish in state 3 (RL)
+ end
+else
+ % In the unsupervised case, it is essential that we break symmetry
+ % in the initial param estimates.
+ %startprob{2} = 'unif';
+ %transprob{2} = 'unif';
+ %termprob{2} = 'unif';
+ startprob{2} = 'rnd';
+ transprob{2} = 'rnd';
+ termprob{2} = 'rnd';
+end
+
+% LEVEL 3
+
+if 1 | true_params
+ startprob{3} = 'leftstart';
+ transprob{3} = 'leftright';
+ termprob{3} = 'rightstop';
+else
+ % If we want to be able to run a base-level model backwards...
+ startprob{3} = 'rnd';
+ transprob{3} = 'rnd';
+ termprob{3} = 'rnd';
+end
+
+
+% OBS LEVEl
+
+if discrete_obs
+ % Initialise observations of lowest level primitives in a way which we can interpret
+ chars = ['L', 'l', 'U', 'u', 'R', 'r', 'D', 'd'];
+ L=find(chars=='L'); l=find(chars=='l');
+ U=find(chars=='U'); u=find(chars=='u');
+ R=find(chars=='R'); r=find(chars=='r');
+ D=find(chars=='D'); d=find(chars=='d');
+ Osize = length(chars);
+
+ if true_params
+ p = 1; % makes each state fully observed
+ else
+ p = 0.9;
+ end
+
+ obsprob = (1-p)*ones([4 2 Osize]);
+ % Q2 Q3 O
+ obsprob(1, 1, L) = p;
+ obsprob(1, 2, l) = p;
+ obsprob(2, 1, U) = p;
+ obsprob(2, 2, u) = p;
+ obsprob(3, 1, R) = p;
+ obsprob(3, 2, r) = p;
+ obsprob(4, 1, D) = p;
+ obsprob(4, 2, d) = p;
+ obsprob = mk_stochastic(obsprob);
+ Oargs = {'CPT', obsprob};
+else
+ % Initialise means of lowest level primitives in a way which we can interpret
+ % These means are little vectors in the east, south, west, north directions.
+ % (left-right=east, up-down=south, right-left=west, down-up=north)
+ Osize = 2;
+ mu = zeros(2, Qsizes(2), Qsizes(3));
+ scale = 3;
+ if true_params
+ noise = 0;
+ else
+ noise = 0.5*scale;
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 1, q3) = scale*[1;0] + noise*rand(2,1);
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 2, q3) = scale*[0;-1] + noise*rand(2,1);
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 3, q3) = scale*[-1;0] + noise*rand(2,1);
+ end
+ for q3=1:Qsizes(3)
+ mu(:, 4, q3) = scale*[0;1] + noise*rand(2,1);
+ end
+ Sigma = repmat(reshape(scale*eye(2), [2 2 1 1 ]), [1 1 Qsizes(2) Qsizes(3)]);
+ Oargs = {'mean', mu, 'cov', Sigma, 'cov_type', 'diag'};
+end
+
+if discrete_obs
+ selfprob = 0.5;
+else
+ selfprob = 0.95;
+ % If less than this, it won't look like a square
+ % because it doesn't spend enough time in each state
+ % Unfortunately, the variance on durations (lengths of each side)
+ % is very large
+end
+bnet = mk_hhmm('Qsizes', Qsizes, 'Osize', Osize', 'discrete_obs', discrete_obs, ...
+ 'Oargs', Oargs, 'Ops', Qnodes(2:3), 'selfprob', selfprob, ...
+ 'startprob', startprob, 'transprob', transprob, 'termprob', termprob);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/plot_square_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/plot_square_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function plot_square_hhmm(ev)
+% Plot the square shape implicit in the evidence.
+% ev{i,t} is the value of node i in slice t.
+% The observed node contains a velocity (delta increment), which is converted
+% into a position.
+% The Q2 node specifies which model is used, and hence which color
+% to use: 1=red, 2=green, 3=blue, 4=black.
+
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+
+delta = cell2num(ev(Onode,:)); % delta(:,t)
+Q2label = cell2num(ev(Q2,:));
+
+T = size(delta, 2);
+pos = zeros(2,T+1);
+hold on
+cols = {'r', 'g', 'b', 'k'};
+for t=2:T+1
+ pos(:,t) = pos(:,t-1) + delta(:,t-1);
+ plot(pos(1,t), pos(2,t), sprintf('%c.', cols{Q2label(t-1)}));
+ if (t==2)
+ text(pos(1,t-1),pos(2,t-1),sprintf('%d',t))
+ elseif (mod(t,20)==0)
+ text(pos(1,t),pos(2,t),sprintf('%d',t))
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/sample_square_hhmm_cts.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/sample_square_hhmm_cts.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+% Generate samples from the HHMM with the true params.
+
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+discrete_obs = 0;
+
+bnet = mk_square_hhmm(discrete_obs, 1);
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+
+for seqi=1:1
+ evidence = sample_dbn(bnet, 'stop_test', 'is_F2_true_D3');
+ clf
+ plot_square_hhmm(evidence);
+ %pretty_print_hhmm_parse(evidence, Qnodes, Fnodes, Onode, []);
+ fprintf('sequence %d has length %d; press key to continue\n', seqi, size(evidence,2))
+ pause
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/sample_square_hhmm_discrete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/sample_square_hhmm_discrete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+% Generate samples from the HHMM with the true params.
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+discrete_obs = 1;
+
+bnet = mk_square_hhmm(discrete_obs, 1);
+
+Tmax = 30;
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+chars = ['L', 'l', 'U', 'u', 'R', 'r', 'D', 'd'];
+
+for seqi=1:3
+ evidence = cell2num(sample_dbn(bnet, 'stop_test', 'is_F2_true_D3'));
+ T = size(evidence, 2)
+ pretty_print_hhmm_parse(evidence, Qnodes, Fnodes, Onode, chars);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/square4.mat
Binary file toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/square4.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/square4_cases.mat
Binary file toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/square4_cases.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/test_square_fig.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/test_square_fig.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1310 @@
+function fig = test_square_fig()
+% This is the machine-generated representation of a Handle Graphics object
+% and its children. Note that handle values may change when these objects
+% are re-created. This may cause problems with any callbacks written to
+% depend on the value of the handle at the time the object was saved.
+%
+% To reopen this object, just type the name of the M-file at the MATLAB
+% prompt. The M-file and its associated MAT-file must be on your path.
+
+load test_square_fig
+
+h0 = figure('Color',[0.8 0.8 0.8], ...
+ 'Colormap',mat0, ...
+ 'PointerShapeCData',mat1, ...
+ 'Position',[540 374 476 292]);
+h1 = axes('Parent',h0, ...
+ 'CameraUpVector',[0 1 0], ...
+ 'Color',[1 1 1], ...
+ 'ColorOrder',mat2, ...
+ 'NextPlot','add', ...
+ 'Position',[0.13 0.11 0.3270231213872832 0.8149999999999998], ...
+ 'XColor',[0 0 0], ...
+ 'XLim',[-10 50], ...
+ 'XLimMode','manual', ...
+ 'YColor',[0 0 0], ...
+ 'YLim',[-60 10], ...
+ 'YLimMode','manual', ...
+ 'ZColor',[0 0 0]);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',0.4608294930875587, ...
+ 'YData',0.2923976608187218);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'String','2');
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',1.152073732718893, ...
+ 'YData',0.2923976608187218);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',2.995391705069125, ...
+ 'YData',0.8771929824561511);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',3.686635944700463, ...
+ 'YData',0.8771929824561511);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',6.451612903225808, ...
+ 'YData',0.8771929824561511);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',9.677419354838712, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',10.36866359447005, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',15.43778801843318, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',17.51152073732719, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',19.81566820276498, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',20.50691244239631, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',23.73271889400922, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',25.57603686635945, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',29.95391705069125, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',31.79723502304147, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',35.02304147465438, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',35.71428571428572, ...
+ 'YData',2.046783625730996);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',38.47926267281106, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',40.3225806451613, ...
+ 'YData',1.461988304093566);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[40.3225806451613 1.461988304093566 0], ...
+ 'String','20');
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',42.62672811059908, ...
+ 'YData',mat3);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',43.31797235023042, ...
+ 'YData',mat4);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',43.31797235023042, ...
+ 'YData',0.8771929824561511);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',43.54838709677419, ...
+ 'YData',0);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',43.77880184331798, ...
+ 'YData',-0.5847953216374293);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',44.47004608294931, ...
+ 'YData',-2.339181286549703);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',44.93087557603687, ...
+ 'YData',-4.385964912280699);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',46.7741935483871, ...
+ 'YData',-9.064327485380119);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.00460829493088, ...
+ 'YData',-10.81871345029239);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.69585253456221, ...
+ 'YData',mat5);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.69585253456221, ...
+ 'YData',-15.20467836257309);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.23502304147466, ...
+ 'YData',-19.00584795321637);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.23502304147466, ...
+ 'YData',-19.88304093567251);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.23502304147466, ...
+ 'YData',-22.51461988304093);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.23502304147466, ...
+ 'YData',-23.09941520467836);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.23502304147466, ...
+ 'YData',-26.02339181286549);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.23502304147466, ...
+ 'YData',-26.31578947368421);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.23502304147466, ...
+ 'YData',-27.77777777777777);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.23502304147466, ...
+ 'YData',-28.3625730994152);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.23502304147466, ...
+ 'YData',-30.99415204678362);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[47.23502304147466 -30.99415204678362 0], ...
+ 'String','40');
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.46543778801843, ...
+ 'YData',-31.57894736842105);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.46543778801843, ...
+ 'YData',-33.62573099415204);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.46543778801843, ...
+ 'YData',-34.50292397660818);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.46543778801843, ...
+ 'YData',-37.42690058479531);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.46543778801843, ...
+ 'YData',-38.01169590643274);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.00460829493088, ...
+ 'YData',-42.39766081871344);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.00460829493088, ...
+ 'YData',-42.98245614035087);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',47.00460829493088, ...
+ 'YData',-46.49122807017543);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',46.7741935483871, ...
+ 'YData',-46.78362573099415);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',46.54377880184332, ...
+ 'YData',-49.41520467836257);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',46.54377880184332, ...
+ 'YData',-49.70760233918128);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',45.85253456221199, ...
+ 'YData',-51.46198830409356);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',45.85253456221199, ...
+ 'YData',-51.75438596491227);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',44.93087557603687, ...
+ 'YData',-53.21637426900584);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',44.70046082949308, ...
+ 'YData',-53.21637426900584);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',44.00921658986175, ...
+ 'YData',-54.09356725146198);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',43.77880184331798, ...
+ 'YData',-54.38596491228069);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',41.93548387096774, ...
+ 'YData',-54.97076023391811);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',41.47465437788019, ...
+ 'YData',-55.26315789473683);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',39.1705069124424, ...
+ 'YData',-55.55555555555554);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[39.1705069124424 -55.55555555555554 0], ...
+ 'String','60');
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',38.94009216589862, ...
+ 'YData',-55.84795321637426);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',36.63594470046083, ...
+ 'YData',-55.55555555555554);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',36.17511520737327, ...
+ 'YData',-55.55555555555554);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',32.94930875576037, ...
+ 'YData',-54.97076023391811);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',32.48847926267281, ...
+ 'YData',-54.97076023391811);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',28.11059907834102, ...
+ 'YData',-53.80116959064326);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',27.64976958525346, ...
+ 'YData',-53.50877192982455);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',23.963133640553, ...
+ 'YData',-53.50877192982455);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',23.27188940092166, ...
+ 'YData',-53.50877192982455);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',19.5852534562212, ...
+ 'YData',-54.97076023391811);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',19.12442396313364, ...
+ 'YData',-54.97076023391811);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',mat6, ...
+ 'YData',-56.14035087719297);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',mat7, ...
+ 'YData',-56.14035087719297);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',9.907834101382491, ...
+ 'YData',-57.30994152046782);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',9.447004608294932, ...
+ 'YData',-57.30994152046782);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',6.221198156682029, ...
+ 'YData',-57.30994152046782);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',4.838709677419356, ...
+ 'YData',-56.7251461988304);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',2.764976958525345, ...
+ 'YData',-56.14035087719297);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',2.534562211981569, ...
+ 'YData',-56.14035087719297);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',0.9216589861751174, ...
+ 'YData',-53.80116959064327);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[0.9216589861751174 -53.80116959064327 0], ...
+ 'String','80');
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',0.6912442396313381, ...
+ 'YData',-53.21637426900584);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-1.152073732718893, ...
+ 'YData',-48.24561403508771);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-1.152073732718893, ...
+ 'YData',-47.953216374269);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-1.843317972350228, ...
+ 'YData',-44.73684210526315);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-1.843317972350228, ...
+ 'YData',-44.44444444444444);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-2.304147465437787, ...
+ 'YData',-39.76608187134502);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-2.764976958525345, ...
+ 'YData',-38.01169590643274);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-3.225806451612904, ...
+ 'YData',-30.99415204678362);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-3.225806451612904, ...
+ 'YData',-29.82456140350877);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-3.225806451612904, ...
+ 'YData',-24.85380116959064);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-3.225806451612904, ...
+ 'YData',-24.26900584795321);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-2.534562211981566, ...
+ 'YData',-17.5438596491228);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-2.304147465437787, ...
+ 'YData',-16.95906432748537);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-1.612903225806452, ...
+ 'YData',-11.98830409356725);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-1.612903225806452, ...
+ 'YData',-11.40350877192982);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',mat8, ...
+ 'YData',-8.47953216374269);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',mat9, ...
+ 'YData',-8.187134502923968);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-1.152073732718893, ...
+ 'YData',-5.263157894736835);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-1.152073732718893, ...
+ 'YData',-4.970760233918128);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-0.9216589861751139, ...
+ 'YData',-2.923976608187132);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[-0.9216589861751139 -2.923976608187132 0], ...
+ 'String','100');
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-0.9216589861751139, ...
+ 'YData',-2.631578947368411);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-0.6912442396313345, ...
+ 'YData',mat10);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-0.6912442396313345, ...
+ 'YData',-0.8771929824561369);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-0.6912442396313345, ...
+ 'YData',-0.5847953216374293);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'HandleVisibility','off', ...
+ 'HorizontalAlignment','center', ...
+ 'Position',[19.80645161290322 12.0675105485232 17.32050807568877], ...
+ 'VerticalAlignment','bottom');
+set(get(h2,'Parent'),'Title',h2);
+h1 = axes('Parent',h0, ...
+ 'CameraUpVector',[0 1 0], ...
+ 'Color',[1 1 1], ...
+ 'ColorOrder',mat11, ...
+ 'NextPlot','add', ...
+ 'Position',[0.5779768786127169 0.11 0.3270231213872832 0.8149999999999998], ...
+ 'XColor',[0 0 0], ...
+ 'YColor',[0 0 0], ...
+ 'ZColor',[0 0 0]);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-0.4608294930875587, ...
+ 'YData',0);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'String','2');
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-2.764976958525345, ...
+ 'YData',-0.2923976608187076);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-3.456221198156683, ...
+ 'YData',-0.2923976608187076);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-7.834101382488477, ...
+ 'YData',-0.2923976608187076);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-11.52073732718894, ...
+ 'YData',-0.2923976608187076);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',mat12, ...
+ 'YData',-0.2923976608187076);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-19.35483870967742, ...
+ 'YData',0.2923976608187076);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-23.50230414746544, ...
+ 'YData',0.2923976608187076);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-24.88479262672811, ...
+ 'YData',0.8771929824561369);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-28.11059907834102, ...
+ 'YData',0.8771929824561369);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-29.49308755760369, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-31.10599078341014, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-32.02764976958525, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-33.17972350230414, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-33.6405529953917, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 1], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-34.7926267281106, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-35.02304147465438, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-35.48387096774194, ...
+ 'YData',1.461988304093566);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-35.71428571428572, ...
+ 'YData',1.461988304093566);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[-35.71428571428572 1.461988304093566 0], ...
+ 'String','20');
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.17511520737327, ...
+ 'YData',mat13);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.40552995391705, ...
+ 'YData',0.8771929824561369);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.63594470046083, ...
+ 'YData',0.2923976608187076);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.63594470046083, ...
+ 'YData',0);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.63594470046083, ...
+ 'YData',mat14);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.40552995391705, ...
+ 'YData',-2.339181286549703);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.40552995391705, ...
+ 'YData',-2.631578947368425);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.40552995391705, ...
+ 'YData',-4.67836257309942);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.40552995391705, ...
+ 'YData',-5.555555555555557);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.40552995391705, ...
+ 'YData',-8.187134502923982);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.40552995391705, ...
+ 'YData',-8.771929824561397);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.86635944700461, ...
+ 'YData',-13.15789473684211);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.09677419354839, ...
+ 'YData',mat15);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.09677419354839, ...
+ 'YData',-16.08187134502924);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.09677419354839, ...
+ 'YData',-17.54385964912281);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.09677419354839, ...
+ 'YData',-18.12865497076023);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.32718894009217, ...
+ 'YData',-19.88304093567251);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.32718894009217, ...
+ 'YData',-20.17543859649123);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.09677419354839, ...
+ 'YData',-21.92982456140351);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.09677419354839, ...
+ 'YData',-22.22222222222222);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[-37.09677419354839 -22.22222222222222 0], ...
+ 'String','40');
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.09677419354839, ...
+ 'YData',-23.09941520467836);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.09677419354839, ...
+ 'YData',-23.39181286549707);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.32718894009217, ...
+ 'YData',-25.14619883040935);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.32718894009217, ...
+ 'YData',-25.43859649122807);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.32718894009217, ...
+ 'YData',-28.3625730994152);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.55760368663595, ...
+ 'YData',-28.94736842105263);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.78801843317973, ...
+ 'YData',-31.87134502923976);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.78801843317973, ...
+ 'YData',-32.16374269005848);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.78801843317973, ...
+ 'YData',-34.7953216374269);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.78801843317973, ...
+ 'YData',-35.38011695906432);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.78801843317973, ...
+ 'YData',-38.88888888888889);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.78801843317973, ...
+ 'YData',-39.76608187134503);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.78801843317973, ...
+ 'YData',-43.27485380116958);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.78801843317973, ...
+ 'YData',-43.5672514619883);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.78801843317973, ...
+ 'YData',-44.44444444444444);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.55760368663595, ...
+ 'YData',-45.32163742690058);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.55760368663595, ...
+ 'YData',-45.61403508771929);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.32718894009217, ...
+ 'YData',-47.36842105263158);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-37.32718894009217, ...
+ 'YData',-47.95321637426901);
+h2 = line('Parent',h1, ...
+ 'Color',[0 1 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.86635944700461, ...
+ 'YData',-49.70760233918129);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[-36.86635944700461 -49.70760233918129 0], ...
+ 'String','60');
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-36.86635944700461, ...
+ 'YData',-50);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-35.71428571428572, ...
+ 'YData',-50.29239766081872);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-35.25345622119816, ...
+ 'YData',-50.29239766081872);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-32.02764976958527, ...
+ 'YData',-50.29239766081872);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-31.33640552995393, ...
+ 'YData',-50.29239766081872);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-27.88018433179725, ...
+ 'YData',-50.58479532163743);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-27.41935483870969, ...
+ 'YData',-50.58479532163743);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-18.20276497695854, ...
+ 'YData',-50.58479532163743);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-16.82027649769586, ...
+ 'YData',-51.16959064327486);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-12.21198156682029, ...
+ 'YData',-50.58479532163743);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-11.52073732718895, ...
+ 'YData',-50.58479532163743);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-6.912442396313377, ...
+ 'YData',-51.16959064327486);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-5.069124423963142, ...
+ 'YData',-51.75438596491229);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',mat16, ...
+ 'YData',-52.046783625731);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',-0.9216589861751281, ...
+ 'YData',-52.33918128654972);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',0.2304147465437687, ...
+ 'YData',-52.33918128654972);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',0.4608294930875481, ...
+ 'YData',-52.33918128654972);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',2.304147465437776, ...
+ 'YData',-52.63157894736843);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',2.534562211981548, ...
+ 'YData',-52.63157894736843);
+h2 = line('Parent',h1, ...
+ 'Color',[1 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',3.917050691244224, ...
+ 'YData',-52.63157894736843);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[3.917050691244224 -52.63157894736843 0], ...
+ 'String','80');
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',4.147465437788011, ...
+ 'YData',-52.63157894736843);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',4.147465437788011, ...
+ 'YData',-52.33918128654972);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',5.529953917050673, ...
+ 'YData',-46.19883040935674);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',5.990783410138231, ...
+ 'YData',-44.44444444444446);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',7.834101382488466, ...
+ 'YData',-28.0701754385965);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',8.294930875576025, ...
+ 'YData',-22.80701754385966);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',8.755760368663584, ...
+ 'YData',mat17);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',8.525345622119797, ...
+ 'YData',-14.9122807017544);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',7.373271889400908, ...
+ 'YData',-10.23391812865498);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',7.142857142857135, ...
+ 'YData',-9.94152046783627);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',6.221198156682018, ...
+ 'YData',-7.602339181286567);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',6.221198156682018, ...
+ 'YData',-7.309941520467845);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',5.760368663594459, ...
+ 'YData',-5.555555555555571);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',5.529953917050673, ...
+ 'YData',-5.555555555555571);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',5.2995391705069, ...
+ 'YData',-4.093567251462005);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',5.069124423963128, ...
+ 'YData',-2.631578947368439);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',5.069124423963128, ...
+ 'YData',-2.339181286549717);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',5.069124423963128, ...
+ 'YData',mat18);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',5.069124423963128, ...
+ 'YData',-1.169590643274873);
+h2 = line('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'LineStyle','none', ...
+ 'Marker','.', ...
+ 'XData',4.838709677419342, ...
+ 'YData',-0.2923976608187218);
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'Position',[4.838709677419342 -0.2923976608187218 0], ...
+ 'String','100');
+h2 = text('Parent',h1, ...
+ 'Color',[0 0 0], ...
+ 'HandleVisibility','off', ...
+ 'HorizontalAlignment','center', ...
+ 'Position',[-10.38961038961038 12.0675105485232 17.32050807568877], ...
+ 'VerticalAlignment','bottom');
+set(get(h2,'Parent'),'Title',h2);
+if nargout > 0, fig = h0; end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/test_square_fig.mat
Binary file toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/Square/test_square_fig.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/abcd_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/abcd_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,97 @@
+% Make the HHMM in Figure 1 of the NIPS'01 paper
+
+Qsize = [2 3 2];
+Qnodes = 1:3;
+D = 3;
+transprob = cell(1,D);
+termprob = cell(1,D);
+startprob = cell(1,D);
+clear A;
+
+% transprob{d}(i,k,j), transprob{1}(i,j)
+% termprob{d}(k,j), termprob{1}(1,j)
+% startprob{d}(k,j), startprob{1}(1,j)
+
+
+% LEVEL 1
+
+% 1 2 e
+A{1} = [0 0 1;
+ 0 0 1];
+[transprob{1}, termprob{1}] = remove_hhmm_end_state(A{1});
+startprob{1} = [0.5 0.5];
+
+% LEVEL 2
+A{2} = zeros(Qsize(2), Qsize(1), Qsize(2)+1);
+
+% 1 2 3 e
+A{2}(:,1,:) = [0 1 0 0 % Q1=1 => model below state 0
+ 0 0 1 0
+ 0 0 0 1];
+
+% 1 2 3 e
+A{2}(:,2,:) = [0 1 0 0 % Q1=2 => model below state 1
+ 0 0 1 0
+ 0 0 0 1];
+
+[transprob{2}, termprob{2}] = remove_hhmm_end_state(A{2});
+
+% always enter level 2 in state 1
+startprob{2} = [1 0 0
+ 1 0 0];
+
+% LEVEL 3
+
+A{3} = zeros([Qsize(3) Qsize(2) Qsize(3)+1]);
+endstate = Qsize(3)+1;
+% Qt-1(3) Qt(2) Qt(3)
+% 1 2 e
+A{3}(1, 1, endstate) = 1.0; % Q2=1 => model below state 2/5
+A{3}(:, 2, :) = [0.0 1.0 0.0 % Q2=2 => model below state 3/6
+ 0.5 0.0 0.5];
+A{3}(1, 3, endstate) = 1.0; % Q2=3 => model below state 4/7
+
+[transprob{3}, termprob{3}] = remove_hhmm_end_state(A{3});
+
+startprob{3} = 'leftstart';
+
+
+
+% OBS LEVEl
+
+chars = ['a', 'b', 'c', 'd', 'x', 'y'];
+Osize = length(chars);
+
+obsprob = zeros([Qsize Osize]);
+% 1 2 3 O
+obsprob(1,1,1,find(chars == 'a')) = 1.0;
+
+obsprob(1,2,1,find(chars == 'x')) = 1.0;
+obsprob(1,2,2,find(chars == 'y')) = 1.0;
+
+obsprob(1,3,1,find(chars == 'b')) = 1.0;
+
+obsprob(2,1,1,find(chars == 'c')) = 1.0;
+
+obsprob(2,2,1,find(chars == 'x')) = 1.0;
+obsprob(2,2,2,find(chars == 'y')) = 1.0;
+
+obsprob(2,3,1,find(chars == 'd')) = 1.0;
+
+Oargs = {'CPT', obsprob};
+
+bnet = mk_hhmm('Qsizes', Qsize, 'Osize', Osize, 'discrete_obs', 1, ...
+ 'Oargs', Oargs, 'Ops', Qnodes(1:3), ...
+ 'startprob', startprob, 'transprob', transprob, 'termprob', termprob);
+
+
+Q1 = 1; Q2 = 2; Q3 = 3; F3 = 4; F2 = 5; Onode = 6;
+Qnodes = [Q1 Q2 Q3]; Fnodes = [F2 F3];
+
+for seqi=1:3
+ evidence = sample_dbn(bnet, 'stop_test', 'is_F2_true_D3');
+ ev = cell2num(evidence);
+ chars(ev(end,:))
+ %T = size(evidence, 2)
+ %pretty_print_hhmm_parse(evidence, Qnodes, Fnodes, Onode, chars);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/add_hhmm_end_state.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/add_hhmm_end_state.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function A = add_hhmm_end_state(transprob, termprob)
+% ADD_HMM_END_STATE Combine trans and term probs into transmat for automaton with an end state
+% function A = add_hhmm_end_state(transprob, termprob)
+%
+% A(i,k,j) = Pr( i->j | Qps=k), where i in 1:Q, j in 1:(Q+1), and Q+1 is the end state
+% This implements the equation in sec 4.6 of my tech report, where
+% transprob(i,k,j) = \tilde{A}_k(i,j), termprob(k,j) = \tau_k(j)
+%
+% For the top level, the k index is missing.
+
+Q = size(transprob,1);
+toplevel = (ndims(transprob)==2);
+if toplevel
+ Qk = 1;
+ transprob = reshape(transprob, [Q 1 Q]);
+ termprob = reshape(termprob, [1 Q]);
+else
+ Qk = size(transprob, 2);
+end
+
+A = zeros(Q, Qk, Q+1);
+A(:,:,Q+1) = termprob';
+
+for k=1:Qk
+ for i=1:Q
+ for j=1:Q
+ A(i,k,j) = transprob(i,k,j) * (1-termprob(k,i));
+ end
+ end
+end
+
+if toplevel
+ A = squeeze(A);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/hhmm_jtree_clqs.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/hhmm_jtree_clqs.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,143 @@
+% Find out how big the cliques are in an HHMM as a function of depth
+% (This is how we get the complexity bound of O(D K^{1.5D}).)
+
+if 0
+Qsize = [];
+Fsize = [];
+Nclqs = [];
+end
+
+ds = 1:15;
+
+for d = ds
+ allQ = 1;
+ [intra, inter, Qnodes, Fnodes, Onode] = mk_hhmm_topo(d, allQ);
+
+ N = length(intra);
+ ns = 2*ones(1,N);
+
+ bnet = mk_dbn(intra, inter, ns);
+ for i=1:N
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+
+ if 0
+ T = 5;
+ dag = unroll_dbn_topology(intra, inter, T);
+ engine = jtree_unrolled_dbn_inf_engine(bnet, T, 'constrained', 1);
+ S = struct(engine);
+ S1 = struct(S.sub_engine);
+ end
+
+ engine = jtree_dbn_inf_engine(bnet);
+ S = struct(engine);
+ J = S.jtree_struct;
+
+ ss = 2*d+1;
+ Qnodes2 = Qnodes + ss;
+ QQnodes = [Qnodes Qnodes2];
+
+ % find out how many Q nodes in each clique, and how many F nodes
+ C = length(J.cliques);
+ Nclqs(d) = 0;
+ for c=1:C
+ Qsize(c,d) = length(myintersect(J.cliques{c}, QQnodes));
+ Fsize(c,d) = length(myintersect(J.cliques{c}, Fnodes));
+ if length(J.cliques{c}) > 1 % exclude observed leaves
+ Nclqs(d) = Nclqs(d) + 1;
+ end
+ end
+ %pred_max_Qsize(d) = ceil(d+(d+1)/2);
+ pred_max_Qsize(d) = ceil(1.5*d);
+
+ fprintf('d=%d\n', d);
+ %fprintf('D=%d, max F = %d. max Q = %d, pred max Q = %d\n', ...
+ % D, max(Fsize), max(Qsize), ceil(D+(D+1)/2));
+
+ %histc(Qsize,1:max(Qsize)) % how many of each size?
+end % next d
+
+
+Q = 2;
+pred_mass = ds.*(Q.^ds) + Q.^(ceil(1.5 * ds))
+pred_mass2 = Q.^(ceil(1.5 * ds))
+
+for d=ds
+ mass(d) = 0;
+ for c=1:C
+ mass(d) = mass(d) + Q^Qsize(c,d);
+ end
+end
+
+
+if 0
+%plot(ds, max(Qsize), 'o-', ds, pred_max_Qsize, '*--');
+%plot(ds, max(Qsize), 'o-', ds, 1.5*ds, '*--');
+%plot(ds, mass, 'o-', ds, pred_mass, '*--');
+D = 15;
+%plot(ds(1:D), mass(1:D), 'bo-', ds(1:D), pred_mass(1:D), 'g*--', ds(1:D), pred_mass2(1:D), 'k+-.');
+plot(ds(1:D), log(mass(1:D)), 'bo-', ds(1:D), log(pred_mass(1:D)), 'g*--', ds(1:D), log(pred_mass2(1:D)), 'k+-.');
+
+grid on
+xlabel('depth of hierarchy')
+title('max num Q nodes in any clique vs. depth')
+legend('actual', 'predicted')
+
+%previewfig(gcf, 'width', 3, 'height', 1.5, 'color', 'bw');
+%exportfig(gcf, '/home/cs/murphyk/WP/ConferencePapers/HHMM/clqsize2.eps', ...
+% 'width', 3, 'height', 1.5, 'color', 'bw');
+
+end
+
+
+if 0
+for d=ds
+ effnumclqs(d) = length(find(Qsize(:,d)>0));
+end
+ds = 1:10;
+Qs = 2:10;
+maxC = size(Qsize, 1);
+cost = [];
+cost_bound = [];
+for qi=1:length(Qs)
+ Q = Qs(qi);
+ for d=ds
+ cost(d,qi) = 0;
+ for c=1:maxC
+ if length(Qsize(c,d) > 0) % this clique contains Q nodes
+ cost(d,qi) = cost(d,qi) + Q^Qsize(c,d)*2^Fsize(c,d);
+ end
+ end
+ %cost_bound(d,qi) = effnumclqs(d) * 8 * Q^(max(Qsize(:,d)));
+ cost_bound(d,qi) = (effnumclqs(d)*8) + Q^(max(Qsize(:,d)));
+ end
+end
+
+qi=2; plot(ds, cost(:,qi), 'o-', ds, cost_bound(:,qi), '*--');
+end
+
+
+if 0
+% convert numbers in cliques into names
+for d=1:D
+ Fdecode(Fnodes(d)) = d;
+end
+for c=8:15
+ clqs = J.cliques{c};
+ fprintf('clique %d: ', c);
+ for k=clqs
+ if myismember(k, Qnodes)
+ fprintf('Q%d ', k)
+ elseif myismember(k, Fnodes)
+ fprintf('F%d ', Fdecode(k))
+ elseif isequal(k, Onode)
+ fprintf('O ')
+ elseif myismember(k, Qnodes2)
+ fprintf('Q%d* ', k-ss)
+ else
+ error(['unrecognized node ' k])
+ end
+ end
+ fprintf('\n');
+end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/mk_hhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/mk_hhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,258 @@
+function [bnet, Qnodes, Fnodes, Onode] = mk_hhmm(varargin)
+% MK_HHMM Make a Hierarchical HMM
+% function [bnet, Qnodes, Fnodes, Onode] = mk_hhmm(...)
+%
+% e.g. 3-layer hierarchical HMM where level 1 only connects to level 2
+% and the parents of the observed node are levels 2 and 3.
+% (This DBN is the same as Fig 10 in my tech report.)
+%
+% Q1 ----------> Q1
+% | \ ^ |
+% | v / |
+% | F2 ------/ |
+% | ^ ^ \ |
+% | / | \ |
+% | / | ||
+% v | vv
+% Q2----| --------> Q2
+% /| \ | ^|
+% / | v | / |
+% | | F3 --------/ |
+% | | ^ \ |
+% | v / v v
+% | Q3 -----------> Q3
+% | |
+% \ |
+% v v
+% O
+%
+%
+% Optional arguments in name/value format [default value in brackets]
+%
+% Qsizes - sizes at each level [ none ]
+% allQ - 1 means level i connects to all Q levels below, 0 means just to i+1 [0]
+% transprob - transprob{d}(i,k,j) = P(Q(d,t)=j|Q(d,t-1)=i,Q(1:d-1,t)=k) ['leftright']
+% startprob - startprob{d}(k,j) = P(Q(d,t)=j|Q(1:d-1,t)=k) ['leftstart']
+% termprob - termprob{d}(k,j) = P(F(d,t)=2|Q(1:d-1,t)=k,Q(d,t)=j) for d>1 ['rightstop']
+% selfprop - prob of a self transition (termprob default = 1-selfprop) [0.8]
+% Osize - size of O node
+% discrete_obs - 1 means O is tabular_CPD, 0 means gaussian_CPD [0]
+% Oargs - cell array of args to pass to the O CPD [ {} ]
+% Ops - Q parents of O [Qnodes(end)]
+% F1 - 1 means level 1 can finish (restart), else there is no F1->Q1 arc [0]
+% clamp1 - 1 means we clamp the params of the Q nodes in slice 1 (Qt1params) [1]
+% Note: the Qt1params are startprob, which should be shared with other slices.
+% However, in the current implementation, the Qt1params will only be estimated
+% from the initial state of each sequence.
+%
+% For d=1, startprob{1}(1,j) is only used in the first slice and
+% termprob{1} is ignored, since we assume the top level never resets.
+% Also, transprob{1}(i,j) can be used instead of transprob{1}(i,1,j).
+%
+% leftstart means the model always starts in state 1.
+% rightstop means the model can only finish in its last state (Qsize(d)).
+% unif means each state is equally like to reach any other
+% rnd means the transition/starting probs are random (drawn from rand)
+%
+% Q1:QD in slice 1 are of type tabular_CPD
+% Q1:QD in slice 2 are of type hhmmQ_CPD.
+% F(2:D-1) is of type hhmmF_CPD, FD is of type tabular_CPD.
+
+args = varargin;
+nargs = length(args);
+
+% get sizes of nodes and topology
+Qsizes = [];
+Osize = [];
+allQ = 0;
+Ops = [];
+F1 = 0;
+for i=1:2:nargs
+ switch args{i},
+ case 'Qsizes', Qsizes = args{i+1};
+ case 'Osize', Osize = args{i+1};
+ case 'allQ', allQ = args{i+1};
+ case 'Ops', Ops = args{i+1};
+ case 'F1', F1 = args{i+1};
+ end
+end
+if isempty(Qsizes), error('must specify Qsizes'); end
+if Osize==0, error('must specify Osize'); end
+D = length(Qsizes);
+Qnodes = 1:D;
+
+if isempty(Ops), Ops = Qnodes(end); end
+
+
+[intra, inter, Qnodes, Fnodes, Onode] = mk_hhmm_topo(D, allQ, Ops, F1);
+ss = length(intra);
+names = {};
+
+if F1
+ Fnodes_ndx = Fnodes;
+else
+ Fnodes_ndx = [-1 Fnodes]; % Fnodes(1) is a dummy index
+end
+
+% set default params
+discrete_obs = 0;
+Oargs = {};
+startprob = cell(1,D);
+startprob{1} = 'unif';
+for d=2:D
+ startprob{d} = 'leftstart';
+end
+transprob = cell(1,D);
+transprob{1} = 'unif';
+for d=2:D
+ transprob{d} = 'leftright';
+end
+termprob = cell(1,D);
+for d=2:D
+ termprob{d} = 'rightstop';
+end
+selfprob = 0.8;
+clamp1 = 1;
+
+for i=1:2:nargs
+ switch args{i},
+ case 'discrete_obs', discrete_obs = args{i+1};
+ case 'Oargs', Oargs = args{i+1};
+ case 'startprob', startprob = args{i+1};
+ case 'transprob', transprob = args{i+1};
+ case 'termprob', termprob = args{i+1};
+ case 'selfprob', selfprob = args{i+1};
+ case 'clamp1', clamp1 = args{i+1};
+ end
+end
+
+ns = zeros(1,ss);
+ns(Qnodes) = Qsizes;
+ns(Onode) = Osize;
+ns(Fnodes) = 2;
+
+dnodes = [Qnodes Fnodes];
+if discrete_obs
+ dnodes = [dnodes Onode];
+end
+onodes = [Onode];
+
+bnet = mk_dbn(intra, inter, ns, 'observed', onodes, 'discrete', dnodes, 'names', names);
+eclass = bnet.equiv_class;
+
+for d=1:D
+ if d==1
+ Qps = [];
+ elseif allQ
+ Qps = Qnodes(1:d-1);
+ else
+ Qps = Qnodes(d-1);
+ end
+ Qpsz = prod(ns(Qps));
+ Qsz = ns(Qnodes(d));
+ if isstr(startprob{d})
+ switch startprob{d}
+ case 'unif', startprob{d} = mk_stochastic(ones(Qpsz, Qsz));
+ case 'rnd', startprob{d} = mk_stochastic(rand(Qpsz, Qsz));
+ case 'leftstart', startprob{d} = zeros(Qpsz, Qsz); startprob{d}(:,1) = 1;
+ end
+ end
+ if isstr(transprob{d})
+ switch transprob{d}
+ case 'unif', transprob{d} = mk_stochastic(ones(Qsz, Qpsz, Qsz));
+ case 'rnd', transprob{d} = mk_stochastic(rand(Qsz, Qpsz, Qsz));
+ case 'leftright',
+ LR = mk_leftright_transmat(Qsz, selfprob);
+ temp = repmat(reshape(LR, [1 Qsz Qsz]), [Qpsz 1 1]); % transprob(k,i,j)
+ transprob{d} = permute(temp, [2 1 3]); % now transprob(i,k,j)
+ end
+ end
+ if isstr(termprob{d})
+ switch termprob{d}
+ case 'unif', termprob{d} = mk_stochastic(ones(Qpsz, Qsz, 2));
+ case 'rnd', termprob{d} = mk_stochastic(rand(Qpsz, Qsz, 2));
+ case 'rightstop',
+ %termprob(k,i,t) Might terminate if i=Qsz; will not terminate if i1 % passed in termprob{d}(k,j)
+ temp = termprob{d};
+ termprob{d} = zeros(Qpsz, Qsz, 2);
+ termprob{d}(:,:,2) = temp;
+ termprob{d}(:,:,1) = ones(Qpsz,Qsz) - temp;
+ end
+end
+
+
+% SLICE 1
+
+for d=1:D
+ bnet.CPD{eclass(Qnodes(d),1)} = tabular_CPD(bnet, Qnodes(d), 'CPT', startprob{d}, 'adjustable', clamp1);
+end
+
+if F1
+ d = 1;
+ bnet.CPD{eclass(Fnodes_ndx(d),1)} = hhmmF_CPD(bnet, Fnodes_ndx(d), Qnodes(d), Fnodes_ndx(d+1), ...
+ 'termprob', termprob{d});
+end
+for d=2:D-1
+ if allQ
+ Qps = Qnodes(1:d-1);
+ else
+ Qps = Qnodes(d-1);
+ end
+ bnet.CPD{eclass(Fnodes_ndx(d),1)} = hhmmF_CPD(bnet, Fnodes_ndx(d), Qnodes(d), Fnodes_ndx(d+1), ...
+ 'Qps', Qps, 'termprob', termprob{d});
+end
+bnet.CPD{eclass(Fnodes_ndx(D),1)} = tabular_CPD(bnet, Fnodes_ndx(D), 'CPT', termprob{D});
+
+if discrete_obs
+ bnet.CPD{eclass(Onode,1)} = tabular_CPD(bnet, Onode, Oargs{:});
+else
+ bnet.CPD{eclass(Onode,1)} = gaussian_CPD(bnet, Onode, Oargs{:});
+end
+
+% SLICE 2
+
+%for d=1:D
+% bnet.CPD{eclass(Qnodes(d),2)} = hhmmQ_CPD(bnet, Qnodes(d)+ss, Qnodes, d, D, ...
+% 'startprob', startprob{d}, 'transprob', transprob{d}, ...
+% 'allQ', allQ);
+%end
+
+d = 1;
+if F1
+ bnet.CPD{eclass(Qnodes(d),2)} = hhmmQ_CPD(bnet, Qnodes(d)+ss, 'Fself', Fnodes_ndx(d), ...
+ 'Fbelow', Fnodes_ndx(d+1), ...
+ 'startprob', startprob{d}, 'transprob', transprob{d});
+else
+ bnet.CPD{eclass(Qnodes(d),2)} = hhmmQ_CPD(bnet, Qnodes(d)+ss, ...
+ 'Fbelow', Fnodes_ndx(d+1), ...
+ 'startprob', startprob{d}, 'transprob', transprob{d});
+end
+for d=2:D-1
+ if allQ
+ Qps = Qnodes(1:d-1);
+ else
+ Qps = Qnodes(d-1);
+ end
+ Qps = Qps + ss; % since all in slice 2
+ bnet.CPD{eclass(Qnodes(d),2)} = hhmmQ_CPD(bnet, Qnodes(d)+ss, 'Fself', Fnodes_ndx(d), ...
+ 'Fbelow', Fnodes_ndx(d+1), 'Qps', Qps, ...
+ 'startprob', startprob{d}, 'transprob', transprob{d});
+end
+d = D;
+if allQ
+ Qps = Qnodes(1:d-1);
+else
+ Qps = Qnodes(d-1);
+end
+Qps = Qps + ss; % since all in slice 2
+bnet.CPD{eclass(Qnodes(d),2)} = hhmmQ_CPD(bnet, Qnodes(d)+ss, 'Fself', Fnodes_ndx(d), ...
+ 'Qps', Qps, ...
+ 'startprob', startprob{d}, 'transprob', transprob{d});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/mk_hhmm_topo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/mk_hhmm_topo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,76 @@
+function [intra, inter, Qnodes, Fnodes, Onode] = mk_hhmm_topo(D, all_Q_to_Qs, Ops, F1)
+% MK_HHMM_TOPO Make Hierarchical HMM topology
+% function [intra, inter, Qnodes, Fnodes, Onode] = mk_hhmm_topo(D, all_Q_to_Qs, Ops, F1)
+%
+% D is the depth of the hierarchy
+% If all_Q_to_Qs = 1, level i connects to all levels below, else just to i+1 [0]
+% Ops are the Q parents of the observed node [Qnodes(end)]
+% If F1=1, level 1 can finish (restart), else there is no F1->Q1 arc [0]
+
+Qnodes = 1:D;
+
+if nargin < 2, all_Q_to_Qs = 1; end
+if nargin < 3, Ops = Qnodes(D); end
+if nargin < 4, F1 = 0; end
+
+if F1
+ Fnodes = 2*D:-1:D+1; % must number from bottom to top
+ Onode = 2*D+1;
+ ss = 2*D+1;
+else
+ Fnodes = [-1 (2*D)-1:-1:D+1]; % Fnodes(1) is a dummy index
+ Onode = 2*D;
+ ss = 2*D;
+end
+
+intra = zeros(ss);
+intra(Ops, Onode) = 1;
+for d=1:D-1
+ if all_Q_to_Qs
+ intra(Qnodes(d), Qnodes(d+1:end)) = 1;
+ else
+ intra(Qnodes(d), Qnodes(d+1)) = 1;
+ end
+end
+for d=D:-1:3
+ intra(Fnodes(d), Fnodes(d-1)) = 1;
+end
+if F1
+ intra(Fnodes(2), Fnodes(1)) = 1;
+end
+if all_Q_to_Qs
+ if F1
+ intra(Qnodes(1), Fnodes(1:end)) = 1;
+ else
+ intra(Qnodes(1), Fnodes(2:end)) = 1;
+ end
+ for d=2:D
+ intra(Qnodes(d), Fnodes(d:end)) = 1;
+ end
+else
+ if F1
+ intra(Qnodes(1), Fnodes([1 2])) = 1;
+ else
+ intra(Qnodes(1), Fnodes(2)) = 1;
+ end
+ for d=2:D-1
+ intra(Qnodes(d), Fnodes([d d+1])) = 1;
+ end
+ intra(Qnodes(D), Fnodes(D)) = 1;
+end
+
+
+inter = zeros(ss);
+for d=1:D
+ inter(Qnodes(d), Qnodes(d)) = 1;
+end
+if F1
+ inter(Fnodes(1), Qnodes(1)) = 1;
+end
+for d=2:D
+ inter(Fnodes(d), Qnodes([d-1 d])) = 1;
+end
+
+if ~F1
+ Fnodes = Fnodes(2:end); % strip off dummy -1 term
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/mk_hhmm_topo_F1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/mk_hhmm_topo_F1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+function [intra, inter, Qnodes, Fnodes, Onode] = mk_hhmm_topo_F1(D, all_Q_to_Qs, Ops)
+% MK_HHMM_TOPO Make Hierarchical HMM topology assuming level 1 can finish
+% function [intra, inter, Qnodes, Fnodes, Onode] = mk_hhmm_topo(D, all_Q_to_Qs, Ops, F1)
+%
+% D is the depth of the hierarchy
+% If all_Q_to_Qs = 1, level i connects to all levels below, else just to i+1 [0]
+% Ops are the Q parents of the observed node [Qnodes(end)]
+% If F1=1, level 1 can finish (restart), else there is no F1->Q1 arc [0]
+
+Qnodes = 1:D;
+
+if nargin < 2, all_Q_to_Qs = 1; end
+if nargin < 3, Ops = Qnodes(D); end
+if nargin < 4, F1 = 0; end
+
+if F1
+ Fnodes = 2*D:-1:D+1; % must number from bottom to top
+ Onode = 2*D+1;
+ ss = 2*D+1;
+else
+ Fnodes = (2*D)-1:-1:D+1;
+ Onode = 2*D;
+ ss = 2*D;
+end
+
+intra = zeros(ss);
+intra(Ops, Onode) = 1;
+for d=1:D-1
+ if all_Q_to_Qs
+ intra(Qnodes(d), Qnodes(d+1:end)) = 1;
+ else
+ intra(Qnodes(d), Qnodes(d+1)) = 1;
+ end
+end
+for d=D:-1:3
+ intra(Fnodes(d), Fnodes(d-1)) = 1;
+end
+if F1
+ intra(Fnodes(2), Fnodes(1)) = 1;
+end
+if all_Q_to_Qs
+ for d=1:D
+ intra(Qnodes(d), Fnodes(d:end)) = 1;
+ end
+else
+ for d=1:D
+ if d < D
+ intra(Qnodes(d), Fnodes([d d+1])) = 1;
+ else
+ intra(Qnodes(d), Fnodes(d)) = 1;
+ end
+ end
+end
+
+inter = zeros(ss);
+for d=1:D
+ inter(Qnodes(d), Qnodes(d)) = 1;
+end
+for d=1:D
+ if d==1
+ inter(Fnodes(d), Qnodes(d)) = 1;
+ else
+ inter(Fnodes(d), Qnodes([d-1 d])) = 1;
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/pretty_print_hhmm_parse.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/pretty_print_hhmm_parse.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,67 @@
+function pretty_print_hhmm_parse(mpe, Qnodes, Fnodes, Onode, alphabet)
+% function pretty_print_hhmm_parse(mpe, Qnodes, Fnodes, Onode, alphabet)
+%
+% mpe(i,t) is the most probable value of node i at time t
+% Qnodes(1:D), Fnodes = [F2 .. FD], Onode contain the node ids
+% alphabet(i) is the i'th output symbol, or [] if don't want displayed
+
+T = size(mpe,2);
+ncols = 20;
+t1 = 1; t2 = min(T, t1+ncols-1);
+while (t1 < T)
+ %fprintf('%d:%d\n', t1, t2);
+ if iscell(mpe)
+ print_block_cell(mpe(:,t1:t2), Qnodes, Fnodes, Onode, alphabet, t1);
+ else
+ print_block(mpe(:,t1:t2), Qnodes, Fnodes, Onode, alphabet, t1);
+ end
+ fprintf('\n\n');
+ t1 = t2+1; t2 = min(T, t1+ncols-1);
+end
+
+%%%%%%
+
+function print_block_cell(mpe, Qnodes, Fnodes, Onode, alphabet, start)
+
+D = length(Qnodes);
+T = size(mpe, 2);
+fprintf('%3d ', start:start+T-1); fprintf('\n');
+for d=1:D
+ for t=1:T
+ if (d > 1) & (mpe{Fnodes(d-1),t} == 2)
+ fprintf('%3d|', mpe{Qnodes(d), t});
+ else
+ fprintf('%3d ', mpe{Qnodes(d), t});
+ end
+ end
+ fprintf('\n');
+end
+if ~isempty(alphabet)
+ a = cell2num(mpe(Onode,:));
+ %fprintf('%3c ', alphabet(mpe{Onode,:}));
+ fprintf('%3c ', alphabet(a))
+ fprintf('\n');
+end
+
+
+%%%%%%
+
+function print_block(mpe, Qnodes, Fnodes, Onode, alphabet, start)
+
+D = length(Qnodes);
+T = size(mpe, 2);
+fprintf('%3d ', start:start+T-1); fprintf('\n');
+for d=1:D
+ for t=1:T
+ if (d > 1) & (mpe(Fnodes(d-1),t) == 2)
+ fprintf('%3d|', mpe(Qnodes(d), t));
+ else
+ fprintf('%3d ', mpe(Qnodes(d), t));
+ end
+ end
+ fprintf('\n');
+end
+if ~isempty(alphabet)
+ fprintf('%3c ', alphabet(mpe(Onode,:)));
+ fprintf('\n');
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/remove_hhmm_end_state.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/HHMM/remove_hhmm_end_state.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function [transprob, termprob] = remove_hhmm_end_state(A)
+% REMOVE_END_STATE Infer transition and termination probabilities from automaton with an end state
+% [transprob, termprob] = remove_end_state(A)
+%
+% A(i,k,j) = Pr( i->j | Qps=k), where i in 1:Q, j in 1:(Q+1), and Q+1 is the end state
+% This implements the equation in footnote 3 of my NIPS 01 paper,
+% transprob(i,k,j) = \tilde{A}_k(i,j)
+% termprob(k,j) = \tau_k(j)
+%
+% For the top level, the k index is missing.
+
+Q = size(A,1);
+toplevel = (ndims(A)==2);
+if toplevel
+ Qk = 1;
+ A = reshape(A, [Q 1 Q+1]);
+else
+ Qk = size(A, 2);
+end
+
+transprob = A(:, :, 1:Q);
+term = A(:,:,Q+1)'; % term(k,j) = P(Qj -> end | k)
+termprob = term;
+%termprob = zeros(Qk, Q, 2);
+%termprob(:,:,2) = term;
+%termprob(:,:,1) = 1-term;
+
+for k=1:Qk
+ for i=1:Q
+ for j=1:Q
+ denom = (1-termprob(k,i));
+ denom = denom + (denom==0)*eps;
+ transprob(i,k,j) = transprob(i,k,j) / denom;
+ end
+ end
+end
+
+if toplevel
+ termprob = squeeze(termprob);
+ transprob = squeeze(transprob);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+/chmm1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/cmp_inference.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/kalman1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/old.water1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/online1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/online2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/scg_dbn.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/chmm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/chmm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+% Compare the speeds of various inference engines on a coupled HMM
+
+N = 2;
+Q = 2;
+rand('state', 0);
+randn('state', 0);
+discrete = 1;
+if discrete
+ Y = 2; % size of output alphabet
+else
+ Y = 1;
+end
+coupled = 1;
+[bnet, onodes] = mk_chmm(N, Q, Y, discrete, coupled);
+ss = N*2;
+
+T = 3;
+
+
+engine = {};
+tic; engine{end+1} = jtree_dbn_inf_engine(bnet, 'observed', onodes); toc
+%tic; engine{end+1} = jtree_ndxSD_dbn_inf_engine(bnet, onodes); toc
+%tic; engine{end+1} = jtree_ndxB_dbn_inf_engine(bnet, onodes); toc
+engine{end+1} = hmm_inf_engine(bnet, onodes);
+%engine{end+1} = dhmm_inf_engine(bnet, onodes);
+tic; engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T, onodes); toc
+
+%engine{end+1} = bk_inf_engine(bnet, 'ff', onodes);
+%engine{end+1} = loopy_dbn_inf_engine(bnet, onodes);
+
+exact = [1 2 3];
+
+filter = 0;
+single = 0;
+maximize = 0;
+
+[err, time, engine] = cmp_inference(bnet, onodes, engine, exact, T, filter, single, maximize);
+%err = cmp_learning(bnet, onodes, engine, exact, T);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/cmp_inference.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/cmp_inference.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,75 @@
+function [err, time, engine] = cmp_inference(bnet, engine, exact, T, filter, singletons, maximize)
+% CMP_INFERENCE Compare several inference engines on a DBN
+% [err, time, engine] = cmp_inference(bnet, engine, exact, T, filter, singletons, maximize)
+%
+% engine{i} is the i'th inference engine.
+% 'exact' specifies which engines do exact inference -
+% we check that these all give the same results.
+% 'T' is the length of the random sequence we generate.
+% If filter=1, we do filtering, else smoothing (default: smoothing)
+% If singletons=1, we compare marginal_nodes, else marginal_family (default: family)
+%
+% err(e,n,t) = sum_i | Pr_exact(X(n,t)=i) - Pr_e(X(n,t)=i) |
+% where Pr_e = prob. according to engine e
+% time(e) = elapsed time for doing inference with engine e
+
+err = [];
+
+if nargin < 5, filter = 0; end
+if nargin < 6, singletons = 0; end
+if nargin < 7, maximize = 0; end
+
+check_ll = 1;
+
+assert(~maximize);
+
+E = length(engine);
+ref = exact(1); % reference
+
+ss = length(bnet.intra);
+ev = sample_dbn(bnet, 'length', T);
+evidence = cell(ss,T);
+onodes = bnet.observed;
+evidence(onodes,:) = ev(onodes, :);
+
+assert(~filter);
+for i=1:E
+ tic;
+ %[engine{i}, ll(i)] = enter_evidence(engine{i}, evidence, 'maximize', maximize);
+ [engine{i}, ll(i)] = enter_evidence(engine{i}, evidence);
+ time(i)=toc;
+ fprintf('engine %d took %6.4f seconds\n', i, time(i));
+end
+
+cmp = mysetdiff(exact, ref);
+if check_ll
+for i=cmp(:)'
+ if ~approxeq(ll(ref), ll(i))
+ error(['engine ' num2str(i) ' has wrong ll'])
+ end
+end
+end
+ll
+
+hnodes = mysetdiff(1:ss, onodes);
+m = cell(1,E);
+for t=1:T
+ for n=hnodes(:)'
+ for e=1:E
+ if singletons
+ m{e} = marginal_nodes(engine{e}, n, t);
+ else
+ m{e} = marginal_family(engine{e}, n, t);
+ end
+ end
+ for e=1:E
+ assert(isequal(m{e}.domain, m{ref}.domain));
+ end
+ for e=cmp(:)'
+ if ~approxeq(m{ref}.T(:), m{e}.T(:))
+ str= sprintf('engine %d is wrong; n=%d, t=%d', e, n, t);
+ error(str)
+ end
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/kalman1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/kalman1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,127 @@
+% Make a linear dynamical system
+% X1 -> X2
+% | |
+% v v
+% Y1 Y2
+
+intra = zeros(2);
+intra(1,2) = 1;
+inter = zeros(2);
+inter(1,1) = 1;
+n = 2;
+
+X = 2; % size of hidden state
+Y = 2; % size of observable state
+
+ns = [X Y];
+dnodes = [];
+onodes = [2];
+eclass1 = [1 2];
+eclass2 = [3 2];
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'eclass1', eclass1, 'eclass2', eclass2, ...
+ 'observed', onodes);
+
+x0 = rand(X,1);
+V0 = eye(X);
+C0 = rand(Y,X);
+R0 = eye(Y);
+A0 = rand(X,X);
+Q0 = eye(X);
+
+bnet.CPD{1} = gaussian_CPD(bnet, 1, 'mean', x0, 'cov', V0, 'cov_prior_weight', 0);
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', zeros(Y,1), 'cov', R0, 'weights', C0, ...
+ 'clamp_mean', 1, 'cov_prior_weight', 0);
+bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', zeros(X,1), 'cov', Q0, 'weights', A0, ...
+ 'clamp_mean', 1, 'cov_prior_weight', 0);
+
+
+T = 5; % fixed length sequences
+
+clear engine;
+engine{1} = kalman_inf_engine(bnet);
+engine{2} = jtree_unrolled_dbn_inf_engine(bnet, T);
+engine{3} = jtree_dbn_inf_engine(bnet);
+N = length(engine);
+
+% inference
+
+ev = sample_dbn(bnet, T);
+evidence = cell(n,T);
+evidence(onodes,:) = ev(onodes, :);
+
+t = 1;
+query = [1 3];
+m = cell(1, N);
+ll = zeros(1, N);
+for i=1:N
+ [engine{i}, ll(i)] = enter_evidence(engine{i}, evidence);
+ m{i} = marginal_nodes(engine{i}, query, t);
+end
+
+% compare all engines to engine{1}
+for i=2:N
+ assert(approxeq(m{1}.mu, m{i}.mu));
+ assert(approxeq(m{1}.Sigma, m{i}.Sigma));
+ assert(approxeq(ll(1), ll(i)));
+end
+
+if 0
+for i=2:N
+ approxeq(m{1}.mu, m{i}.mu)
+ approxeq(m{1}.Sigma, m{i}.Sigma)
+ approxeq(ll(1), ll(i))
+end
+end
+
+% learning
+
+ncases = 5;
+cases = cell(1, ncases);
+for i=1:ncases
+ ev = sample_dbn(bnet, T);
+ cases{i} = cell(n,T);
+ cases{i}(onodes,:) = ev(onodes, :);
+end
+
+max_iter = 2;
+bnet2 = cell(1,N);
+LLtrace = cell(1,N);
+for i=1:N
+ [bnet2{i}, LLtrace{i}] = learn_params_dbn_em(engine{i}, cases, 'max_iter', max_iter);
+end
+
+for i=1:N
+ temp = bnet2{i};
+ for e=1:3
+ CPD{i,e} = struct(temp.CPD{e});
+ end
+end
+
+for i=2:N
+ assert(approxeq(LLtrace{i}, LLtrace{1}));
+ for e=1:3
+ assert(approxeq(CPD{i,e}.mean, CPD{1,e}.mean));
+ assert(approxeq(CPD{i,e}.cov, CPD{1,e}.cov));
+ assert(approxeq(CPD{i,e}.weights, CPD{1,e}.weights));
+ end
+end
+
+
+% Compare to KF toolbox
+
+data = zeros(Y, T, ncases);
+for i=1:ncases
+ data(:,:,i) = cell2num(cases{i}(onodes, :));
+end
+[A2, C2, Q2, R2, x2, V2, LL2trace] = learn_kalman(data, A0, C0, Q0, R0, x0, V0, max_iter);
+
+
+e = 1;
+assert(approxeq(x2, CPD{e,1}.mean))
+assert(approxeq(V2, CPD{e,1}.cov))
+assert(approxeq(C2, CPD{e,2}.weights))
+assert(approxeq(R2, CPD{e,2}.cov));
+assert(approxeq(A2, CPD{e,3}.weights))
+assert(approxeq(Q2, CPD{e,3}.cov));
+assert(approxeq(LL2trace, LLtrace{1}))
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/old.water1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/old.water1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+% Compare the speeds of various inference engines on the water DBN
+
+[bnet, onodes] = mk_water_dbn;
+
+T = 3;
+
+engine = {};
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T, onodes);
+engine{end+1} = hmm_inf_engine(bnet, onodes);
+engine{end+1} = frontier_inf_engine(bnet, onodes);
+engine{end+1} = jtree_dbn_inf_engine(bnet, onodes);
+engine{end+1} = bk_inf_engine(bnet, 'exact', onodes);
+
+engine{end+1} = bk_inf_engine(bnet, 'ff', onodes);
+engine{end+1} = bk_inf_engine(bnet, { [1 2], [3 4 5 6], [7 8] }, onodes);
+
+N = length(engine);
+exact = 1:5;
+
+
+filter = 0;
+err = cmp_inference(bnet, onodes, engine, exact, T, filter);
+
+% elapsed times for enter_evidence (matlab 5.3 on PIII with 256MB running Redhat linux)
+
+% T = 5, 4/20/00
+% 0.6266 unrolled *
+% 0.3490 hmm *
+% 1.1743 frontier
+% 1.4621 old frontier
+% 0.3270 fast frontier *
+% 1.3926 jtree
+% 1.3790 bk
+% 0.4916 fast bk
+% 0.4190 fast bk compiled
+% 0.3574 fast jtree *
+
+
+err = cmp_learning(bnet, onodes, engine, exact, T);
+
+% elapsed times for learn_params_dbn_em (matlab 5.3 on PIII with 256MB running Redhat linux)
+
+% T = 5, 2cases, 2 iter, 4/20/00
+% 3.5750 unrolled
+% 3.7475 hmm
+% 2.1452 fast frontier
+% 2.5724 fast bk compiled
+% 2.3387 fast jtree
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/online1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/online1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+% Check that online inference gives same results as filtering for various algorithms
+
+N = 3;
+Q = 2;
+ss = N*2;
+
+rand('state', 0);
+randn('state', 0);
+
+
+obs_size = 1;
+discrete_obs = 0;
+bnet = mk_chmm(N, Q, obs_size, discrete_obs);
+ns = bnet.node_sizes_slice;
+
+engine = {};
+engine{end+1} = hmm_inf_engine(bnet);
+E = length(engine);
+
+onodes = (1:N)+N;
+
+T = 4;
+ev = cell(ss,T);
+ev(onodes,:) = num2cell(randn(N, T));
+
+
+filter = 1;
+loglik2 = zeros(1,E);
+for e=1:E
+ [engine2{e}, loglik2(e)] = enter_evidence(engine{e}, ev, 'filter', filter);
+end
+
+loglik = zeros(1,E);
+marg1 = cell(E,N,T);
+for e=1:E
+ ll = zeros(1,T);
+ engine{e} = dbn_init_bel(engine{e});
+ for t=1:T
+ [engine{e}, ll(t)] = dbn_update_bel(engine{e}, ev(:,t), t);
+ for i=1:N
+ marg1{e,i,t} = dbn_marginal_from_bel(engine{e}, i);
+ end
+ end
+ loglik1(e) = sum(ll);
+end
+
+assert(approxeq(loglik1, loglik2))
+
+a = zeros(E,N,T);
+for e=1:E
+ for t=1:T
+ for i=1:N
+ marg2{e,i,t} = marginal_nodes(engine2{e}, i, t);
+ a(e,i,t) = (approxeq(marg2{e,i,t}.T(:), marg1{e,i,t}.T(:)));
+ end
+ end
+end
+
+assert(all(a(:)==1))
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/online2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/online2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,33 @@
+N = 1; % regular HMM
+Q = 2;
+ss = 2;
+hnodes = 1;
+onodes = 2;
+
+rand('state', 0);
+randn('state', 0);
+O = 2;
+discrete_obs = 1;
+bnet = mk_chmm(N, Q, O, discrete_obs);
+ns = bnet.node_sizes_slice;
+
+engine = hmm_inf_engine(bnet, onodes);
+
+T = 4;
+ev = cell(ss,T);
+ev(onodes,:) = num2cell(sample_discrete([0.5 0.5], N, T));
+
+
+engine = dbn_init_bel(engine);
+for t=1:T
+ if t==1
+ [engine, ll(t)] = dbn_update_bel1(engine, ev(:,t));
+ else
+ [engine, ll(t)] = dbn_update_bel(engine, ev(:,t-1:t));
+ end
+ % one-step ahead prediction
+ lag = 1;
+ engine2 = dbn_predict_bel(engine, lag);
+ marg = dbn_marginal_from_bel(engine2, 1)
+ marg = dbn_marginal_from_bel(engine2, 2)
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/scg_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/scg_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,70 @@
+% to test whether scg inference engine can handl dynameic BN
+% Make a linear dynamical system
+% X1 -> X2
+% | |
+% v v
+% Y1 Y2
+
+intra = zeros(2);
+intra(1,2) = 1;
+inter = zeros(2);
+inter(1,1) = 1;
+n = 2;
+
+X = 2; % size of hidden state
+Y = 2; % size of observable state
+
+ns = [X Y];
+dnodes = [];
+onodes = [2];
+eclass1 = [1 2];
+eclass2 = [3 2];
+bnet = mk_dbn(intra, inter, ns, dnodes, eclass1, eclass2);
+
+x0 = rand(X,1);
+V0 = eye(X);
+C0 = rand(Y,X);
+R0 = eye(Y);
+A0 = rand(X,X);
+Q0 = eye(X);
+
+bnet.CPD{1} = gaussian_CPD(bnet, 1, 'mean', x0, 'cov', V0);
+%bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', zeros(Y,1), 'cov', R0, 'weights', C0, 'full', 'untied', 'clamped_mean');
+%bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', zeros(X,1), 'cov', Q0, 'weights', A0, 'full', 'untied', 'clamped_mean');
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', zeros(Y,1), 'cov', R0, 'weights', C0);
+bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', zeros(X,1), 'cov', Q0, 'weights', A0);
+
+
+T = 5; % fixed length sequences
+
+clear engine;
+%engine{1} = kalman_inf_engine(bnet, onodes);
+engine{1} = scg_unrolled_dbn_inf_engine(bnet, T, onodes);
+engine{2} = jtree_unrolled_dbn_inf_engine(bnet, T);
+
+N = length(engine);
+
+% inference
+
+ev = sample_dbn(bnet, T);
+evidence = cell(n,T);
+evidence(onodes,:) = ev(onodes, :);
+
+t = 2;
+query = [1 3];
+m = cell(1, N);
+ll = zeros(1, N);
+
+engine{1} = enter_evidence(engine{1}, evidence);
+[engine{2}, ll(2)] = enter_evidence(engine{2}, evidence);
+m{1} = marginal_nodes(engine{1}, query);
+m{2} = marginal_nodes(engine{2}, query, t);
+
+
+% compare all engines to engine{1}
+for i=2:N
+ assert(approxeq(m{1}.mu, m{i}.mu));
+ assert(approxeq(m{1}.Sigma, m{i}.Sigma));
+% assert(approxeq(ll(1), ll(i)));
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/mk_gmux_robot_dbn.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_linear_slam.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/slam_kf.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/slam_offline_loopy.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/slam_partial_kf.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/slam_stationary_loopy.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/SLAM
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/offline_loopy_slam.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/paskin1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/skf_data_assoc_gmux2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/slam_kf.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/dynamic/SLAM/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/offline_loopy_slam.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/offline_loopy_slam.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,231 @@
+% We navigate a robot around a square using a fixed control policy and no noise.
+% We assume the robot observes the relative distance to the nearest landmark.
+% Everything is linear-Gaussian.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Create toy data set
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+if 1
+ T = 20;
+ ctrl_signal = [repmat([1 0]', 1, T/4) repmat([0 1]', 1, T/4) ...
+ repmat([-1 0]', 1, T/4) repmat([0 -1]', 1, T/4)];
+else
+ T = 5;
+ ctrl_signal = repmat([1 0]', 1, T);
+end
+
+nlandmarks = 4;
+true_landmark_pos = [1 1;
+ 4 1;
+ 4 4;
+ 1 4]';
+init_robot_pos = [0 0]';
+
+true_robot_pos = zeros(2, T);
+true_data_assoc = zeros(1, T);
+true_rel_dist = zeros(2, T);
+for t=1:T
+ if t>1
+ true_robot_pos(:,t) = true_robot_pos(:,t-1) + ctrl_signal(:,t);
+ else
+ true_robot_pos(:,t) = init_robot_pos + ctrl_signal(:,t);
+ end
+ nn = argmin(dist2(true_robot_pos(:,t)', true_landmark_pos'));
+ %nn = t; % observe 1, 2, 3
+ true_data_assoc(t) = nn;
+ true_rel_dist(:,t) = true_landmark_pos(:, nn) - true_robot_pos(:,t);
+end
+
+figure(1);
+%clf;
+hold on
+%plot(true_landmark_pos(1,:), true_landmark_pos(2,:), '*');
+for i=1:nlandmarks
+ text(true_landmark_pos(1,i), true_landmark_pos(2,i), sprintf('L%d',i));
+end
+for t=1:T
+ text(true_robot_pos(1,t), true_robot_pos(2,t), sprintf('%d',t));
+end
+hold off
+axis([-1 6 -1 6])
+
+R = 1e-3*eye(2); % noise added to observation
+Q = 1e-3*eye(2); % noise added to robot motion
+
+% Create data set
+obs_noise_seq = sample_gaussian([0 0]', R, T)';
+obs_rel_pos = true_rel_dist + obs_noise_seq;
+%obs_rel_pos = true_rel_dist;
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Create params for inference
+
+% X(t) = A X(t-1) + B U(t) + noise(Q)
+
+% [L1] = [1 ] * [L1] + [0] * Ut + [0 ]
+% [L2] [ 1 ] [L2] [0] [ 0 ]
+% [R ]t [ 1] [R ]t-1 [1] [ Q]
+
+% Y(t)|S(t)=s = C(s) X(t) + noise(R)
+% Yt|St=1 = [1 0 -1] * [L1] + R
+% [L2]
+% [R ]
+
+% Create indices into block structure
+bs = 2*ones(1, nlandmarks+1); % sizes of blocks in state space
+robot_block = block(nlandmarks+1, bs);
+for i=1:nlandmarks
+ landmark_block(:,i) = block(i, bs)';
+end
+Xsz = 2*(nlandmarks+1); % 2 values for each landmark plus robot
+Ysz = 2; % observe relative location
+Usz = 2; % input is (dx, dy)
+
+
+% create block-diagonal trans matrix for each switch
+A = zeros(Xsz, Xsz);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ A(bi, bi) = eye(2);
+end
+bi = robot_block;
+A(bi, bi) = eye(2);
+A = repmat(A, [1 1 nlandmarks]); % same for all switch values
+
+% create block-diagonal system cov
+
+
+Qbig = zeros(Xsz, Xsz);
+bi = robot_block;
+Qbig(bi,bi) = Q; % only add noise to robot motion
+Qbig = repmat(Qbig, [1 1 nlandmarks]);
+
+% create input matrix
+B = zeros(Xsz, Usz);
+B(robot_block,:) = eye(2); % only add input to robot position
+B = repmat(B, [1 1 nlandmarks]);
+
+% create observation matrix for each value of the switch node
+% C(:,:,i) = (0 ... I ... -I) where the I is in the i'th posn.
+% This computes L(i) - R
+C = zeros(Ysz, Xsz, nlandmarks);
+for i=1:nlandmarks
+ C(:, landmark_block(:,i), i) = eye(2);
+ C(:, robot_block, i) = -eye(2);
+end
+
+% create observation cov for each value of the switch node
+Rbig = repmat(R, [1 1 nlandmarks]);
+
+% initial conditions
+init_x = zeros(Xsz, 1);
+init_v = zeros(Xsz, Xsz);
+bi = robot_block;
+init_x(bi) = init_robot_pos;
+init_V(bi, bi) = 1e-5*eye(2); % very sure of robot posn
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ init_V(bi,bi)= 1e5*eye(2); % very uncertain of landmark psosns
+ %init_x(bi) = true_landmark_pos(:,i);
+ %init_V(bi,bi)= 1e-5*eye(2); % very sure of landmark psosns
+end
+
+%%%%%%%%%%%%%%%%%%%%%
+% Inference
+if 1
+[xsmooth, Vsmooth] = kalman_smoother(obs_rel_pos, A, C, Qbig, Rbig, init_x, init_V, ...
+ 'model', true_data_assoc, 'u', ctrl_signal, 'B', B);
+
+est_robot_pos = xsmooth(robot_block, :);
+est_robot_pos_cov = Vsmooth(robot_block, robot_block, :);
+
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ est_landmark_pos(:,i) = xsmooth(bi, T);
+ est_landmark_pos_cov(:,:,i) = Vsmooth(bi, bi, T);
+end
+end
+
+
+if 0
+figure(1); hold on
+for i=1:nlandmarks
+ h=plotgauss2d(est_landmark_pos(:,i), est_landmark_pos_cov(:,:,i));
+ set(h, 'color', 'r')
+end
+hold off
+
+hold on
+for t=1:T
+ h=plotgauss2d(est_robot_pos(:,t), est_robot_pos_cov(:,:,t));
+ set(h,'color','r')
+ h=text(est_robot_pos(1,t), est_robot_pos(2,2), sprintf('R%d', t));
+ set(h,'color','r')
+end
+hold off
+end
+
+
+if 0
+figure(3)
+if 0
+ for t=1:T
+ imagesc(inv(Vsmooth(:,:,t)))
+ colorbar
+ fprintf('t=%d; press key to continue\n', t);
+ pause
+ end
+else
+ for t=1:T
+ subplot(5,4,t)
+ imagesc(inv(Vsmooth(:,:,t)))
+ end
+end
+end
+
+
+
+
+
+%%%%%%%%%%%%%%%%%
+% DBN inference
+
+if 1
+ [bnet, Unode, Snode, Lnodes, Rnode, Ynode, Lsnode] = ...
+ mk_gmux_robot_dbn(nlandmarks, Q, R, init_x, init_V, robot_block, landmark_block);
+ engine = pearl_unrolled_dbn_inf_engine(bnet, 'max_iter', 50, 'filename', ...
+ '/home/eecs/murphyk/matlab/loopyslam.txt');
+else
+ [bnet, Unode, Snode, Lnodes, Rnode, Ynode] = ...
+ mk_gmux2_robot_dbn(nlandmarks, Q, R, init_x, init_V, robot_block, landmark_block);
+ engine = jtree_dbn_inf_engine(bnet);
+end
+
+nnodes = bnet.nnodes_per_slice;
+evidence = cell(nnodes, T);
+evidence(Ynode, :) = num2cell(obs_rel_pos, 1);
+evidence(Unode, :) = num2cell(ctrl_signal, 1);
+evidence(Snode, :) = num2cell(true_data_assoc);
+
+
+[engine, ll, niter] = enter_evidence(engine, evidence);
+niter
+
+loopy_est_robot_pos = zeros(2, T);
+for t=1:T
+ m = marginal_nodes(engine, Rnode, t);
+ loopy_est_robot_pos(:,t) = m.mu;
+end
+
+for i=1:nlandmarks
+ m = marginal_nodes(engine, Lnodes(i), T);
+ loopy_est_landmark_pos(:,i) = m.mu;
+ loopy_est_landmark_pos_cov(:,:,i) = m.Sigma;
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/paskin1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/paskin1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,238 @@
+% This is like robot1, except we only use a Kalman filter.
+% The goal is to study how the precision matrix changes.
+
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+if 0
+ T = 20;
+ ctrl_signal = [repmat([1 0]', 1, T/4) repmat([0 1]', 1, T/4) ...
+ repmat([-1 0]', 1, T/4) repmat([0 -1]', 1, T/4)];
+else
+ T = 60;
+ ctrl_signal = repmat([1 0]', 1, T);
+end
+
+nlandmarks = 6;
+if 0
+ true_landmark_pos = [1 1;
+ 4 1;
+ 4 4;
+ 1 4]';
+else
+ true_landmark_pos = 10*rand(2,nlandmarks);
+end
+if 0
+figure(1); clf
+hold on
+for i=1:nlandmarks
+ %text(true_landmark_pos(1,i), true_landmark_pos(2,i), sprintf('L%d',i));
+ plot(true_landmark_pos(1,i), true_landmark_pos(2,i), '*')
+end
+hold off
+end
+
+init_robot_pos = [0 0]';
+
+true_robot_pos = zeros(2, T);
+true_data_assoc = zeros(1, T);
+true_rel_dist = zeros(2, T);
+for t=1:T
+ if t>1
+ true_robot_pos(:,t) = true_robot_pos(:,t-1) + ctrl_signal(:,t);
+ else
+ true_robot_pos(:,t) = init_robot_pos + ctrl_signal(:,t);
+ end
+ nn = argmin(dist2(true_robot_pos(:,t)', true_landmark_pos'));
+ %true_data_assoc(t) = nn;
+ %true_data_assoc = wrap(t, nlandmarks); % observe 1, 2, 3, 4, 1, 2, ...
+ true_data_assoc = sample_discrete(normalise(ones(1,nlandmarks)),1,T);
+ true_rel_dist(:,t) = true_landmark_pos(:, nn) - true_robot_pos(:,t);
+end
+
+R = 1e-3*eye(2); % noise added to observation
+Q = 1e-3*eye(2); % noise added to robot motion
+
+% Create data set
+obs_noise_seq = sample_gaussian([0 0]', R, T)';
+obs_rel_pos = true_rel_dist + obs_noise_seq;
+%obs_rel_pos = true_rel_dist;
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Create params for inference
+
+% X(t) = A X(t-1) + B U(t) + noise(Q)
+
+% [L1] = [1 ] * [L1] + [0] * Ut + [0 ]
+% [L2] [ 1 ] [L2] [0] [ 0 ]
+% [R ]t [ 1] [R ]t-1 [1] [ Q]
+
+% Y(t)|S(t)=s = C(s) X(t) + noise(R)
+% Yt|St=1 = [1 0 -1] * [L1] + R
+% [L2]
+% [R ]
+
+% Create indices into block structure
+bs = 2*ones(1, nlandmarks+1); % sizes of blocks in state space
+robot_block = block(nlandmarks+1, bs);
+for i=1:nlandmarks
+ landmark_block(:,i) = block(i, bs)';
+end
+Xsz = 2*(nlandmarks+1); % 2 values for each landmark plus robot
+Ysz = 2; % observe relative location
+Usz = 2; % input is (dx, dy)
+
+
+% create block-diagonal trans matrix for each switch
+A = zeros(Xsz, Xsz);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ A(bi, bi) = eye(2);
+end
+bi = robot_block;
+A(bi, bi) = eye(2);
+A = repmat(A, [1 1 nlandmarks]); % same for all switch values
+
+% create block-diagonal system cov
+
+
+Qbig = zeros(Xsz, Xsz);
+bi = robot_block;
+Qbig(bi,bi) = Q; % only add noise to robot motion
+Qbig = repmat(Qbig, [1 1 nlandmarks]);
+
+% create input matrix
+B = zeros(Xsz, Usz);
+B(robot_block,:) = eye(2); % only add input to robot position
+B = repmat(B, [1 1 nlandmarks]);
+
+% create observation matrix for each value of the switch node
+% C(:,:,i) = (0 ... I ... -I) where the I is in the i'th posn.
+% This computes L(i) - R
+C = zeros(Ysz, Xsz, nlandmarks);
+for i=1:nlandmarks
+ C(:, landmark_block(:,i), i) = eye(2);
+ C(:, robot_block, i) = -eye(2);
+end
+
+% create observation cov for each value of the switch node
+Rbig = repmat(R, [1 1 nlandmarks]);
+
+% initial conditions
+init_x = zeros(Xsz, 1);
+init_v = zeros(Xsz, Xsz);
+bi = robot_block;
+init_x(bi) = init_robot_pos;
+%init_V(bi, bi) = 1e-5*eye(2); % very sure of robot posn
+init_V(bi, bi) = Q; % simualate uncertainty due to 1 motion step
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ init_V(bi,bi)= 1e5*eye(2); % very uncertain of landmark psosns
+ %init_x(bi) = true_landmark_pos(:,i);
+ %init_V(bi,bi)= 1e-5*eye(2); % very sure of landmark psosns
+end
+
+%k = nlandmarks-1; % exact
+k = 3;
+ndx = {};
+for t=1:T
+ landmarks = unique(true_data_assoc(t:-1:max(t-k,1)));
+ tmp = [landmark_block(:, landmarks) robot_block'];
+ ndx{t} = tmp(:);
+end
+
+[xa, Va] = kalman_filter(obs_rel_pos, A, C, Qbig, Rbig, init_x, init_V, ...
+ 'model', true_data_assoc, 'u', ctrl_signal, 'B', B, ...
+ 'ndx', ndx);
+
+[xe, Ve] = kalman_filter(obs_rel_pos, A, C, Qbig, Rbig, init_x, init_V, ...
+ 'model', true_data_assoc, 'u', ctrl_signal, 'B', B);
+
+
+if 0
+est_robot_pos = x(robot_block, :);
+est_robot_pos_cov = V(robot_block, robot_block, :);
+
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ est_landmark_pos(:,i) = x(bi, T);
+ est_landmark_pos_cov(:,:,i) = V(bi, bi, T);
+end
+end
+
+
+
+nrows = 10;
+stepsize = T/(2*nrows);
+ts = 1:stepsize:T;
+
+if 1 % plot
+
+clim = [0 max(max(Va(:,:,end)))];
+
+figure(2)
+if 0
+ imagesc(Ve(1:2:end,1:2:end, T))
+ clim = get(gca,'clim');
+else
+ i = 1;
+ for t=ts(:)'
+ subplot(nrows,2,i)
+ i = i + 1;
+ imagesc(Ve(1:2:end,1:2:end, t))
+ set(gca, 'clim', clim)
+ colorbar
+ end
+end
+suptitle('exact')
+
+
+figure(3)
+if 0
+ imagesc(Va(1:2:end,1:2:end, T))
+ set(gca,'clim', clim)
+else
+ i = 1;
+ for t=ts(:)'
+ subplot(nrows,2,i)
+ i = i+1;
+ imagesc(Va(1:2:end,1:2:end, t))
+ set(gca, 'clim', clim)
+ colorbar
+ end
+end
+suptitle('approx')
+
+
+figure(4)
+i = 1;
+for t=ts(:)'
+ subplot(nrows,2,i)
+ i = i+1;
+ Vd = Va(1:2:end,1:2:end, t) - Ve(1:2:end,1:2:end,t);
+ imagesc(Vd)
+ set(gca, 'clim', clim)
+ colorbar
+end
+suptitle('diff')
+
+end % all plot
+
+
+for t=1:T
+ i = 1:2*nlandmarks;
+ denom = Ve(i,i,t) + (Ve(i,i,t)==0);
+ Vd =(Va(i,i,t)-Ve(i,i,t)) ./ denom;
+ Verr(t) = max(Vd(:));
+end
+figure(6); plot(Verr)
+title('max relative Verr')
+
+for t=1:T
+ %err(t)=rms(xa(:,t), xe(:,t));
+ err(t)=rms(xa(1:end-2,t), xe(1:end-2,t)); % exclude robot
+end
+figure(5);plot(err)
+title('rms mean pos')
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/skf_data_assoc_gmux2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/skf_data_assoc_gmux2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,155 @@
+% This is like skf_data_assoc_gmux, except the objects don't move.
+% We are uncertain of their initial positions, and get more and more observations
+% over time. The goal is to test deterministic links (0 covariance).
+% This is like robot1, except the robot doesn't move and is always at [0 0],
+% so the relative location is simply L(s).
+
+nobj = 2;
+N = nobj+2;
+Xs = 1:nobj;
+S = nobj+1;
+Y = nobj+2;
+
+intra = zeros(N,N);
+inter = zeros(N,N);
+intra([Xs S], Y) =1;
+for i=1:nobj
+ inter(Xs(i), Xs(i))=1;
+end
+
+Xsz = 2; % state space = (x y)
+Ysz = 2;
+ns = zeros(1,N);
+ns(Xs) = Xsz;
+ns(Y) = Ysz;
+ns(S) = nobj;
+
+bnet = mk_dbn(intra, inter, ns, 'discrete', S, 'observed', [S Y]);
+
+% For each object, we have
+% X(t+1) = F X(t) + noise(Q)
+% Y(t) = H X(t) + noise(R)
+F = eye(2);
+H = eye(2);
+Q = 0*eye(Xsz); % no noise in dynamics
+R = eye(Ysz);
+
+init_state{1} = [10 10]';
+init_state{2} = [10 -10]';
+init_cov = eye(2);
+
+% Uncertain of initial state (position)
+for i=1:nobj
+ bnet.CPD{Xs(i)} = gaussian_CPD(bnet, Xs(i), 'mean', init_state{i}, 'cov', init_cov);
+end
+bnet.CPD{S} = root_CPD(bnet, S); % always observed
+bnet.CPD{Y} = gmux_CPD(bnet, Y, 'cov', repmat(R, [1 1 nobj]), 'weights', repmat(H, [1 1 nobj]));
+% slice 2
+eclass = bnet.equiv_class;
+for i=1:nobj
+ bnet.CPD{eclass(Xs(i), 2)} = gaussian_CPD(bnet, Xs(i)+N, 'mean', zeros(Xsz,1), 'cov', Q, 'weights', F);
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Create LDS params
+
+% X(t) = A X(t-1) + B U(t) + noise(Q)
+
+% [L11] = [1 ] * [L1] + [Q ]
+% [L2] [ 1] [L2] [ Q]
+
+% Y(t)|S(t)=s = C(s) X(t) + noise(R)
+% Yt|St=1 = [1 0] * [L1] + R
+% [L2]
+
+nlandmarks = nobj;
+
+% Create indices into block structure
+bs = 2*ones(1, nobj); % sizes of blocks in state space
+for i=1:nlandmarks
+ landmark_block(:,i) = block(i, bs)';
+end
+Xsz = 2*(nlandmarks); % 2 values for each landmark plus robot
+Ysz = 2; % observe relative location
+
+% create block-diagonal trans matrix for each switch
+A = zeros(Xsz, Xsz);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ A(bi, bi) = eye(2);
+end
+A = repmat(A, [1 1 nlandmarks]); % same for all switch values
+
+% create block-diagonal system cov
+Qbig = zeros(Xsz, Xsz);
+Qbig = repmat(Qbig, [1 1 nlandmarks]);
+
+
+% create observation matrix for each value of the switch node
+% C(:,:,i) = (0 ... I ...) where the I is in the i'th posn.
+C = zeros(Ysz, Xsz, nlandmarks);
+for i=1:nlandmarks
+ C(:, landmark_block(:,i), i) = eye(2);
+end
+
+% create observation cov for each value of the switch node
+Rbig = repmat(R, [1 1 nlandmarks]);
+
+% initial conditions
+init_x = [init_state{1}; init_state{2}];
+init_V = zeros(Xsz, Xsz);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ init_V(bi,bi) = init_cov;
+end
+
+
+
+%%%%%%%%%%%%%%%%
+% Observe objects at random
+T = 10;
+evidence = cell(N, T);
+data_assoc = sample_discrete(normalise(ones(1,nobj)), 1, T);
+evidence(S,:) = num2cell(data_assoc);
+evidence = sample_dbn(bnet, 'evidence', evidence);
+
+
+% Inference
+ev = cell(N,T);
+ev(bnet.observed,:) = evidence(bnet.observed, :);
+y = cell2num(evidence(Y,:));
+
+engine = pearl_unrolled_dbn_inf_engine(bnet);
+engine = enter_evidence(engine, ev);
+
+loopy_est_pos = zeros(2, nlandmarks);
+loopy_est_pos_cov = zeros(2, 2, nlandmarks);
+for i=1:nobj
+ m = marginal_nodes(engine, Xs(i), T);
+ loopy_est_pos(:,i) = m.mu;
+ loopy_est_pos_cov(:,:,i) = m.Sigma;
+end
+
+
+[xsmooth, Vsmooth] = kalman_smoother(y, A, C, Qbig, Rbig, init_x, init_V, 'model', data_assoc);
+
+kf_est_pos = zeros(2, nlandmarks);
+kf_est_pos_cov = zeros(2, 2, nlandmarks);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ kf_est_pos(:,i) = xsmooth(bi, T);
+ kf_est_pos_cov(:,:,i) = Vsmooth(bi, bi, T);
+end
+
+
+kf_est_pos
+loopy_est_pos
+
+kf_est_pos_time = zeros(2, nlandmarks, T);
+for t=1:T
+ for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ kf_est_pos_time(:,i,t) = xsmooth(bi, t);
+ end
+end
+kf_est_pos_time % same for all t since smoothed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/slam_kf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/Old/slam_kf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,172 @@
+% This is like robot1, except we only use a Kalman filter.
+% The goal is to study how the precision matrix changes.
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+if 0
+ T = 20;
+ ctrl_signal = [repmat([1 0]', 1, T/4) repmat([0 1]', 1, T/4) ...
+ repmat([-1 0]', 1, T/4) repmat([0 -1]', 1, T/4)];
+else
+ T = 12;
+ ctrl_signal = repmat([1 0]', 1, T);
+end
+
+nlandmarks = 6;
+if 0
+ true_landmark_pos = [1 1;
+ 4 1;
+ 4 4;
+ 1 4]';
+else
+ true_landmark_pos = 10*rand(2,nlandmarks);
+end
+figure(1); clf
+hold on
+for i=1:nlandmarks
+ %text(true_landmark_pos(1,i), true_landmark_pos(2,i), sprintf('L%d',i));
+ plot(true_landmark_pos(1,i), true_landmark_pos(2,i), '*')
+end
+hold off
+
+init_robot_pos = [0 0]';
+
+true_robot_pos = zeros(2, T);
+true_data_assoc = zeros(1, T);
+true_rel_dist = zeros(2, T);
+for t=1:T
+ if t>1
+ true_robot_pos(:,t) = true_robot_pos(:,t-1) + ctrl_signal(:,t);
+ else
+ true_robot_pos(:,t) = init_robot_pos + ctrl_signal(:,t);
+ end
+ %nn = argmin(dist2(true_robot_pos(:,t)', true_landmark_pos'));
+ nn = wrap(t, nlandmarks); % observe 1, 2, 3, 4, 1, 2, ...
+ true_data_assoc(t) = nn;
+ true_rel_dist(:,t) = true_landmark_pos(:, nn) - true_robot_pos(:,t);
+end
+
+R = 1e-3*eye(2); % noise added to observation
+Q = 1e-3*eye(2); % noise added to robot motion
+
+% Create data set
+obs_noise_seq = sample_gaussian([0 0]', R, T)';
+obs_rel_pos = true_rel_dist + obs_noise_seq;
+%obs_rel_pos = true_rel_dist;
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Create params for inference
+
+% X(t) = A X(t-1) + B U(t) + noise(Q)
+
+% [L1] = [1 ] * [L1] + [0] * Ut + [0 ]
+% [L2] [ 1 ] [L2] [0] [ 0 ]
+% [R ]t [ 1] [R ]t-1 [1] [ Q]
+
+% Y(t)|S(t)=s = C(s) X(t) + noise(R)
+% Yt|St=1 = [1 0 -1] * [L1] + R
+% [L2]
+% [R ]
+
+% Create indices into block structure
+bs = 2*ones(1, nlandmarks+1); % sizes of blocks in state space
+robot_block = block(nlandmarks+1, bs);
+for i=1:nlandmarks
+ landmark_block(:,i) = block(i, bs)';
+end
+Xsz = 2*(nlandmarks+1); % 2 values for each landmark plus robot
+Ysz = 2; % observe relative location
+Usz = 2; % input is (dx, dy)
+
+
+% create block-diagonal trans matrix for each switch
+A = zeros(Xsz, Xsz);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ A(bi, bi) = eye(2);
+end
+bi = robot_block;
+A(bi, bi) = eye(2);
+A = repmat(A, [1 1 nlandmarks]); % same for all switch values
+
+% create block-diagonal system cov
+
+
+Qbig = zeros(Xsz, Xsz);
+bi = robot_block;
+Qbig(bi,bi) = Q; % only add noise to robot motion
+Qbig = repmat(Qbig, [1 1 nlandmarks]);
+
+% create input matrix
+B = zeros(Xsz, Usz);
+B(robot_block,:) = eye(2); % only add input to robot position
+B = repmat(B, [1 1 nlandmarks]);
+
+% create observation matrix for each value of the switch node
+% C(:,:,i) = (0 ... I ... -I) where the I is in the i'th posn.
+% This computes L(i) - R
+C = zeros(Ysz, Xsz, nlandmarks);
+for i=1:nlandmarks
+ C(:, landmark_block(:,i), i) = eye(2);
+ C(:, robot_block, i) = -eye(2);
+end
+
+% create observation cov for each value of the switch node
+Rbig = repmat(R, [1 1 nlandmarks]);
+
+% initial conditions
+init_x = zeros(Xsz, 1);
+init_v = zeros(Xsz, Xsz);
+bi = robot_block;
+init_x(bi) = init_robot_pos;
+init_V(bi, bi) = 1e-5*eye(2); % very sure of robot posn
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ init_V(bi,bi)= 1e5*eye(2); % very uncertain of landmark psosns
+ %init_x(bi) = true_landmark_pos(:,i);
+ %init_V(bi,bi)= 1e-5*eye(2); % very sure of landmark psosns
+end
+
+[xsmooth, Vsmooth] = kalman_filter(obs_rel_pos, A, C, Qbig, Rbig, init_x, init_V, ...
+ 'model', true_data_assoc, 'u', ctrl_signal, 'B', B);
+
+est_robot_pos = xsmooth(robot_block, :);
+est_robot_pos_cov = Vsmooth(robot_block, robot_block, :);
+
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ est_landmark_pos(:,i) = xsmooth(bi, T);
+ est_landmark_pos_cov(:,:,i) = Vsmooth(bi, bi, T);
+end
+
+
+
+P = zeros(size(Vsmooth));
+for t=1:T
+ P(:,:,t) = inv(Vsmooth(:,:,t));
+end
+
+figure(1)
+for t=1:T
+ subplot(T/2,2,t)
+ imagesc(P(1:2:end,1:2:end, t))
+ colorbar
+end
+
+figure(2)
+for t=1:T
+ subplot(T/2,2,t)
+ imagesc(Vsmooth(1:2:end,1:2:end, t))
+ colorbar
+end
+
+
+
+% marginalize out robot position and then check structure
+bi = landmark_block(:);
+V = Vsmooth(bi,bi,T);
+P = inv(V);
+P(1:2:end,1:2:end)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/mk_gmux_robot_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/mk_gmux_robot_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,85 @@
+function [bnet, Unode, Snode, Lnodes, Rnode, Ynode, Lsnode] = ...
+ mk_gmux_robot_dbn(nlandmarks, Q, R, init_x, init_V, robot_block, landmark_block)
+
+% Make DBN
+
+% S
+% | L1 -------> L1'
+% | | L2 ----------> L2'
+% \ | /
+% v v v
+% Ls
+% |
+% v
+% Y
+% ^
+% |
+% R -------> R'
+% ^
+% |
+% U
+%
+%
+% S is a switch, Ls is a deterministic gmux, Y = Ls-R,
+% R(t+1) = R(t) + U(t+1), L(t+1) = L(t)
+
+
+% number nodes topologically
+Snode = 1;
+Lnodes = 2:nlandmarks+1;
+Lsnode = nlandmarks+2;
+Unode = nlandmarks+3;
+Rnode = nlandmarks+4;
+Ynode = nlandmarks+5;
+
+nnodes = nlandmarks+5;
+intra = zeros(nnodes, nnodes);
+intra([Snode Lnodes], Lsnode) =1;
+intra(Unode,Rnode)=1;
+intra([Rnode Lsnode], Ynode)=1;
+
+inter = zeros(nnodes, nnodes);
+inter(Rnode, Rnode)=1;
+for i=1:nlandmarks
+ inter(Lnodes(i), Lnodes(i))=1;
+end
+
+Lsz = 2; % (x y) posn of landmark
+Rsz = 2; % (x y) posn of robot
+Ysz = 2; % relative distance
+Usz = 2; % (dx dy) ctrl
+Ssz = nlandmarks; % can switch between any landmark
+
+ns = zeros(1,nnodes);
+ns(Snode) = Ssz;
+ns(Lnodes) = Lsz;
+ns(Lsnode) = Lsz;
+ns(Ynode) = Ysz;
+ns(Rnode) = Rsz;
+ns(Ynode) = Usz;
+ns(Unode) = Usz;
+
+bnet = mk_dbn(intra, inter, ns, 'discrete', Snode, 'observed', [Snode Ynode Unode]);
+
+
+bnet.CPD{Snode} = root_CPD(bnet, Snode); % always observed
+bnet.CPD{Unode} = root_CPD(bnet, Unode); % always observed
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ bnet.CPD{Lnodes(i)} = gaussian_CPD(bnet, Lnodes(i), 'mean', init_x(bi), 'cov', init_V(bi,bi));
+end
+bi = robot_block;
+bnet.CPD{Rnode} = gaussian_CPD(bnet, Rnode, 'mean', init_x(bi), 'cov', init_V(bi,bi), 'weights', eye(2));
+bnet.CPD{Lsnode} = gmux_CPD(bnet, Lsnode, 'cov', repmat(zeros(Lsz,Lsz), [1 1 nlandmarks]), ...
+ 'weights', repmat(eye(Lsz,Lsz), [1 1 nlandmarks]));
+W = [eye(2) -eye(2)]; % Y = Ls - R, where Ls is the lower-numbered parent
+bnet.CPD{Ynode} = gaussian_CPD(bnet, Ynode, 'mean', zeros(Ysz,1), 'cov', R, 'weights', W);
+
+% slice 2
+eclass = bnet.equiv_class;
+W = [eye(2) eye(2)]; % R(t) = R(t-1) + U(t), where R(t-1) is the lower-numbered parent
+bnet.CPD{eclass(Rnode,2)} = gaussian_CPD(bnet, Rnode+nnodes, 'mean', zeros(Rsz,1), 'cov', Q, 'weights', W);
+for i=1:nlandmarks
+ bnet.CPD{eclass(Lnodes(i), 2)} = gaussian_CPD(bnet, Lnodes(i)+nnodes, 'mean', zeros(2,1), ...
+ 'cov', zeros(2,2), 'weights', eye(2));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/mk_linear_slam.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/mk_linear_slam.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,164 @@
+function [A,B,C,Q,R,Qbig,Rbig,init_x,init_V,robot_block,landmark_block,...
+ true_landmark_pos, true_robot_pos, true_data_assoc, ...
+ obs_rel_pos, ctrl_signal] = mk_linear_slam(varargin)
+
+% We create data from a linear system for testing SLAM algorithms.
+% i.e. , new robot pos = old robot pos + ctrl_signal, which is just a displacement vector.
+% and observation = landmark_pos - robot_pos, which is just a displacement vector.
+%
+% The behavior is determined by the following optional arguments:
+%
+% 'nlandmarks' - num. landmarks
+% 'landmarks' - 'rnd' means random locations in the unit sqyare
+% 'square' means at [1 1], [4 1], [4 4] and [1 4]
+% 'T' - num steps to run
+% 'ctrl' - 'stationary' means the robot remains at [0 0],
+% 'leftright' means the robot receives a constant contol of [1 0],
+% 'square' means we navigate the robot around the square
+% 'data-assoc' - 'rnd' means we observe landmarks at random
+% 'nn' means we observe the nearest neighbor landmark
+% 'cycle' means we observe landmarks in order 1,2,.., 1, 2, ...
+
+args = varargin;
+% get mandatory params
+for i=1:2:length(args)
+ switch args{i},
+ case 'nlandmarks', nlandmarks = args{i+1};
+ case 'T', T = args{i+1};
+ end
+end
+
+% set defaults
+true_landmark_pos = rand(2,nlandmarks);
+true_data_assoc = [];
+
+% get args
+for i=1:2:length(args)
+ switch args{i},
+ case 'landmarks',
+ switch args{i+1},
+ case 'rnd', true_landmark_pos = rand(2,nlandmarks);
+ case 'square', true_landmark_pos = [1 1; 4 1; 4 4; 1 4]';
+ end
+ case 'ctrl',
+ switch args{i+1},
+ case 'stationary', ctrl_signal = repmat([0 0]', 1, T);
+ case 'leftright', ctrl_signal = repmat([1 0]', 1, T);
+ case 'square', ctrl_signal = [repmat([1 0]', 1, T/4) repmat([0 1]', 1, T/4) ...
+ repmat([-1 0]', 1, T/4) repmat([0 -1]', 1, T/4)];
+ end
+ case 'data-assoc',
+ switch args{i+1},
+ case 'rnd', true_data_assoc = sample_discrete(normalise(ones(1,nlandmarks)),1,T);
+ case 'cycle', true_data_assoc = wrap(1:T, nlandmarks);
+ end
+ end
+end
+if isempty(true_data_assoc)
+ use_nn = 1;
+else
+ use_nn = 0;
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+% generate data
+
+init_robot_pos = [0 0]';
+true_robot_pos = zeros(2, T);
+true_rel_dist = zeros(2, T);
+for t=1:T
+ if t>1
+ true_robot_pos(:,t) = true_robot_pos(:,t-1) + ctrl_signal(:,t);
+ else
+ true_robot_pos(:,t) = init_robot_pos + ctrl_signal(:,t);
+ end
+ nn = argmin(dist2(true_robot_pos(:,t)', true_landmark_pos'));
+ if use_nn
+ true_data_assoc(t) = nn;
+ end
+ true_rel_dist(:,t) = true_landmark_pos(:, nn) - true_robot_pos(:,t);
+end
+
+
+R = 1e-3*eye(2); % noise added to observation
+Q = 1e-3*eye(2); % noise added to robot motion
+
+% Create data set
+obs_noise_seq = sample_gaussian([0 0]', R, T)';
+obs_rel_pos = true_rel_dist + obs_noise_seq;
+%obs_rel_pos = true_rel_dist;
+
+%%%%%%%%%%%%%%%%%%
+% Create params
+
+
+% X(t) = A X(t-1) + B U(t) + noise(Q)
+
+% [L1] = [1 ] * [L1] + [0] * Ut + [0 ]
+% [L2] [ 1 ] [L2] [0] [ 0 ]
+% [R ]t [ 1] [R ]t-1 [1] [ Q]
+
+% Y(t)|S(t)=s = C(s) X(t) + noise(R)
+% Yt|St=1 = [1 0 -1] * [L1] + R
+% [L2]
+% [R ]
+
+% Create indices into block structure
+bs = 2*ones(1, nlandmarks+1); % sizes of blocks in state space
+robot_block = block(nlandmarks+1, bs);
+for i=1:nlandmarks
+ landmark_block(:,i) = block(i, bs)';
+end
+Xsz = 2*(nlandmarks+1); % 2 values for each landmark plus robot
+Ysz = 2; % observe relative location
+Usz = 2; % input is (dx, dy)
+
+
+% create block-diagonal trans matrix for each switch
+A = zeros(Xsz, Xsz);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ A(bi, bi) = eye(2);
+end
+bi = robot_block;
+A(bi, bi) = eye(2);
+A = repmat(A, [1 1 nlandmarks]); % same for all switch values
+
+% create block-diagonal system cov
+
+
+Qbig = zeros(Xsz, Xsz);
+bi = robot_block;
+Qbig(bi,bi) = Q; % only add noise to robot motion
+Qbig = repmat(Qbig, [1 1 nlandmarks]);
+
+% create input matrix
+B = zeros(Xsz, Usz);
+B(robot_block,:) = eye(2); % only add input to robot position
+B = repmat(B, [1 1 nlandmarks]);
+
+% create observation matrix for each value of the switch node
+% C(:,:,i) = (0 ... I ... -I) where the I is in the i'th posn.
+% This computes L(i) - R
+C = zeros(Ysz, Xsz, nlandmarks);
+for i=1:nlandmarks
+ C(:, landmark_block(:,i), i) = eye(2);
+ C(:, robot_block, i) = -eye(2);
+end
+
+% create observation cov for each value of the switch node
+Rbig = repmat(R, [1 1 nlandmarks]);
+
+% initial conditions
+init_x = zeros(Xsz, 1);
+init_v = zeros(Xsz, Xsz);
+bi = robot_block;
+init_x(bi) = init_robot_pos;
+%init_V(bi, bi) = 1e-5*eye(2); % very sure of robot posn
+init_V(bi, bi) = Q; % simualate uncertainty due to 1 motion step
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ init_V(bi,bi)= 1e5*eye(2); % very uncertain of landmark psosns
+ %init_x(bi) = true_landmark_pos(:,i);
+ %init_V(bi,bi)= 1e-5*eye(2); % very sure of landmark psosns
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/slam_kf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/slam_kf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,78 @@
+% Plot how precision matrix changes over time for KF solution
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+[A,B,C,Q,R,Qbig,Rbig,init_x,init_V,robot_block,landmark_block,...
+ true_landmark_pos, true_robot_pos, true_data_assoc, ...
+ obs_rel_pos, ctrl_signal] = mk_linear_slam(...
+ 'nlandmarks', 6, 'T', 12, 'ctrl', 'leftright', 'data-assoc', 'cycle');
+
+figure(1); clf
+hold on
+for i=1:nlandmarks
+ %text(true_landmark_pos(1,i), true_landmark_pos(2,i), sprintf('L%d',i));
+ plot(true_landmark_pos(1,i), true_landmark_pos(2,i), '*')
+end
+hold off
+
+
+[x, V] = kalman_filter(obs_rel_pos, A, C, Qbig, Rbig, init_x, init_V, ...
+ 'model', true_data_assoc, 'u', ctrl_signal, 'B', B);
+
+est_robot_pos = x(robot_block, :);
+est_robot_pos_cov = V(robot_block, robot_block, :);
+
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ est_landmark_pos(:,i) = x(bi, T);
+ est_landmark_pos_cov(:,:,i) = V(bi, bi, T);
+end
+
+
+if 0
+figure(1); hold on
+for i=1:nlandmarks
+ h=plotgauss2d(est_landmark_pos(:,i), est_landmark_pos_cov(:,:,i));
+ set(h, 'color', 'r')
+end
+hold off
+
+hold on
+for t=1:T
+ h=plotgauss2d(est_robot_pos(:,t), est_robot_pos_cov(:,:,t));
+ set(h,'color','r')
+ h=text(est_robot_pos(1,t), est_robot_pos(2,2), sprintf('R%d', t));
+ set(h,'color','r')
+end
+hold off
+end
+
+
+P = zeros(size(V));
+for t=1:T
+ P(:,:,t) = inv(V(:,:,t));
+end
+
+if 0
+ figure(2)
+ for t=1:T
+ subplot(T/2,2,t)
+ imagesc(P(1:2:end,1:2:end, t))
+ colorbar
+ end
+else
+ figure(2)
+ for t=1:T
+ subplot(T/2,2,t)
+ imagesc(V(1:2:end,1:2:end, t))
+ colorbar
+ end
+end
+
+% marginalize out robot position and then check structure
+bi = landmark_block(:);
+V = V(bi,bi,T);
+P = inv(V);
+P(1:2:end,1:2:end)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/slam_offline_loopy.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/slam_offline_loopy.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+% Compare Kalman smoother with loopy
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+nlandmarks = 6;
+T = 12;
+
+[A,B,C,Q,R,Qbig,Rbig,init_x,init_V,robot_block,landmark_block,...
+ true_landmark_pos, true_robot_pos, true_data_assoc, ...
+ obs_rel_pos, ctrl_signal] = mk_linear_slam(...
+ 'nlandmarks', nlandmarks, 'T', T, 'ctrl', 'leftright', 'data-assoc', 'cycle');
+
+[xsmooth, Vsmooth] = kalman_smoother(obs_rel_pos, A, C, Qbig, Rbig, init_x, init_V, ...
+ 'model', true_data_assoc, 'u', ctrl_signal, 'B', B);
+
+est_robot_pos = xsmooth(robot_block, :);
+est_robot_pos_cov = Vsmooth(robot_block, robot_block, :);
+
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ est_landmark_pos(:,i) = xsmooth(bi, T);
+ est_landmark_pos_cov(:,:,i) = Vsmooth(bi, bi, T);
+end
+
+
+if 1
+ [bnet, Unode, Snode, Lnodes, Rnode, Ynode, Lsnode] = ...
+ mk_gmux_robot_dbn(nlandmarks, Q, R, init_x, init_V, robot_block, landmark_block);
+ engine = pearl_unrolled_dbn_inf_engine(bnet, 'max_iter', 50, 'filename', ...
+ '/home/eecs/murphyk/matlab/loopyslam.txt');
+else
+ [bnet, Unode, Snode, Lnodes, Rnode, Ynode] = ...
+ mk_gmux2_robot_dbn(nlandmarks, Q, R, init_x, init_V, robot_block, landmark_block);
+ engine = jtree_dbn_inf_engine(bnet);
+end
+
+nnodes = bnet.nnodes_per_slice;
+evidence = cell(nnodes, T);
+evidence(Ynode, :) = num2cell(obs_rel_pos, 1);
+evidence(Unode, :) = num2cell(ctrl_signal, 1);
+evidence(Snode, :) = num2cell(true_data_assoc);
+
+[engine, ll, niter] = enter_evidence(engine, evidence);
+niter
+
+loopy_est_robot_pos = zeros(2, T);
+for t=1:T
+ m = marginal_nodes(engine, Rnode, t);
+ loopy_est_robot_pos(:,t) = m.mu;
+end
+
+for i=1:nlandmarks
+ m = marginal_nodes(engine, Lnodes(i), T);
+ loopy_est_landmark_pos(:,i) = m.mu;
+ loopy_est_landmark_pos_cov(:,:,i) = m.Sigma;
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/slam_partial_kf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/slam_partial_kf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,107 @@
+% See how well partial Kalman filter updates work
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+nlandmarks = 6;
+T = 12;
+
+[A,B,C,Q,R,Qbig,Rbig,init_x,init_V,robot_block,landmark_block,...
+ true_landmark_pos, true_robot_pos, true_data_assoc, ...
+ obs_rel_pos, ctrl_signal] = mk_linear_slam(...
+ 'nlandmarks', nlandmarks, 'T', T, 'ctrl', 'leftright', 'data-assoc', 'cycle');
+
+% exact
+[xe, Ve] = kalman_filter(obs_rel_pos, A, C, Qbig, Rbig, init_x, init_V, ...
+ 'model', true_data_assoc, 'u', ctrl_signal, 'B', B);
+
+
+% approx
+%k = nlandmarks-1; % exact
+k = 3;
+ndx = {};
+for t=1:T
+ landmarks = unique(true_data_assoc(t:-1:max(t-k,1)));
+ tmp = [landmark_block(:, landmarks) robot_block'];
+ ndx{t} = tmp(:);
+end
+
+[xa, Va] = kalman_filter(obs_rel_pos, A, C, Qbig, Rbig, init_x, init_V, ...
+ 'model', true_data_assoc, 'u', ctrl_signal, 'B', B, ...
+ 'ndx', ndx);
+
+
+
+nrows = 10;
+stepsize = T/(2*nrows);
+ts = 1:stepsize:T;
+
+if 1 % plot
+
+clim = [0 max(max(Va(:,:,end)))];
+
+figure(2)
+if 0
+ imagesc(Ve(1:2:end,1:2:end, T))
+ clim = get(gca,'clim');
+else
+ i = 1;
+ for t=ts(:)'
+ subplot(nrows,2,i)
+ i = i + 1;
+ imagesc(Ve(1:2:end,1:2:end, t))
+ set(gca, 'clim', clim)
+ colorbar
+ end
+end
+suptitle('exact')
+
+
+figure(3)
+if 0
+ imagesc(Va(1:2:end,1:2:end, T))
+ set(gca,'clim', clim)
+else
+ i = 1;
+ for t=ts(:)'
+ subplot(nrows,2,i)
+ i = i+1;
+ imagesc(Va(1:2:end,1:2:end, t))
+ set(gca, 'clim', clim)
+ colorbar
+ end
+end
+suptitle('approx')
+
+
+figure(4)
+i = 1;
+for t=ts(:)'
+ subplot(nrows,2,i)
+ i = i+1;
+ Vd = Va(1:2:end,1:2:end, t) - Ve(1:2:end,1:2:end,t);
+ imagesc(Vd)
+ set(gca, 'clim', clim)
+ colorbar
+end
+suptitle('diff')
+
+end % all plot
+
+
+for t=1:T
+ %err(t)=rms(xa(:,t), xe(:,t));
+ err(t)=rms(xa(1:end-2,t), xe(1:end-2,t)); % exclude robot
+end
+figure(5);plot(err)
+title('rms mean pos')
+
+
+for t=1:T
+ i = 1:2*nlandmarks;
+ denom = Ve(i,i,t) + (Ve(i,i,t)==0);
+ Vd =(Va(i,i,t)-Ve(i,i,t)) ./ denom;
+ Verr(t) = max(Vd(:));
+end
+figure(6); plot(Verr)
+title('max relative Verr')
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/slam_stationary_loopy.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/SLAM/slam_stationary_loopy.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,155 @@
+% This is like skf_data_assoc_gmux, except the objects don't move.
+% We are uncertain of their initial positions, and get more and more observations
+% over time. The goal is to test deterministic links (0 covariance).
+% This is like robot1, except the robot doesn't move and is always at [0 0],
+% so the relative location is simply L(s).
+
+nobj = 2;
+N = nobj+2;
+Xs = 1:nobj;
+S = nobj+1;
+Y = nobj+2;
+
+intra = zeros(N,N);
+inter = zeros(N,N);
+intra([Xs S], Y) =1;
+for i=1:nobj
+ inter(Xs(i), Xs(i))=1;
+end
+
+Xsz = 2; % state space = (x y)
+Ysz = 2;
+ns = zeros(1,N);
+ns(Xs) = Xsz;
+ns(Y) = Ysz;
+ns(S) = nobj;
+
+bnet = mk_dbn(intra, inter, ns, 'discrete', S, 'observed', [S Y]);
+
+% For each object, we have
+% X(t+1) = F X(t) + noise(Q)
+% Y(t) = H X(t) + noise(R)
+F = eye(2);
+H = eye(2);
+Q = 0*eye(Xsz); % no noise in dynamics
+R = eye(Ysz);
+
+init_state{1} = [10 10]';
+init_state{2} = [10 -10]';
+init_cov = eye(2);
+
+% Uncertain of initial state (position)
+for i=1:nobj
+ bnet.CPD{Xs(i)} = gaussian_CPD(bnet, Xs(i), 'mean', init_state{i}, 'cov', init_cov);
+end
+bnet.CPD{S} = root_CPD(bnet, S); % always observed
+bnet.CPD{Y} = gmux_CPD(bnet, Y, 'cov', repmat(R, [1 1 nobj]), 'weights', repmat(H, [1 1 nobj]));
+% slice 2
+eclass = bnet.equiv_class;
+for i=1:nobj
+ bnet.CPD{eclass(Xs(i), 2)} = gaussian_CPD(bnet, Xs(i)+N, 'mean', zeros(Xsz,1), 'cov', Q, 'weights', F);
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Create LDS params
+
+% X(t) = A X(t-1) + B U(t) + noise(Q)
+
+% [L11] = [1 ] * [L1] + [Q ]
+% [L2] [ 1] [L2] [ Q]
+
+% Y(t)|S(t)=s = C(s) X(t) + noise(R)
+% Yt|St=1 = [1 0] * [L1] + R
+% [L2]
+
+nlandmarks = nobj;
+
+% Create indices into block structure
+bs = 2*ones(1, nobj); % sizes of blocks in state space
+for i=1:nlandmarks
+ landmark_block(:,i) = block(i, bs)';
+end
+Xsz = 2*(nlandmarks); % 2 values for each landmark plus robot
+Ysz = 2; % observe relative location
+
+% create block-diagonal trans matrix for each switch
+A = zeros(Xsz, Xsz);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ A(bi, bi) = eye(2);
+end
+A = repmat(A, [1 1 nlandmarks]); % same for all switch values
+
+% create block-diagonal system cov
+Qbig = zeros(Xsz, Xsz);
+Qbig = repmat(Qbig, [1 1 nlandmarks]);
+
+
+% create observation matrix for each value of the switch node
+% C(:,:,i) = (0 ... I ...) where the I is in the i'th posn.
+C = zeros(Ysz, Xsz, nlandmarks);
+for i=1:nlandmarks
+ C(:, landmark_block(:,i), i) = eye(2);
+end
+
+% create observation cov for each value of the switch node
+Rbig = repmat(R, [1 1 nlandmarks]);
+
+% initial conditions
+init_x = [init_state{1}; init_state{2}];
+init_V = zeros(Xsz, Xsz);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ init_V(bi,bi) = init_cov;
+end
+
+
+
+%%%%%%%%%%%%%%%%
+% Observe objects at random
+T = 10;
+evidence = cell(N, T);
+data_assoc = sample_discrete(normalise(ones(1,nobj)), 1, T);
+evidence(S,:) = num2cell(data_assoc);
+evidence = sample_dbn(bnet, 'evidence', evidence);
+
+
+% Inference
+ev = cell(N,T);
+ev(bnet.observed,:) = evidence(bnet.observed, :);
+y = cell2num(evidence(Y,:));
+
+engine = pearl_unrolled_dbn_inf_engine(bnet);
+engine = enter_evidence(engine, ev);
+
+loopy_est_pos = zeros(2, nlandmarks);
+loopy_est_pos_cov = zeros(2, 2, nlandmarks);
+for i=1:nobj
+ m = marginal_nodes(engine, Xs(i), T);
+ loopy_est_pos(:,i) = m.mu;
+ loopy_est_pos_cov(:,:,i) = m.Sigma;
+end
+
+
+[xsmooth, Vsmooth] = kalman_smoother(y, A, C, Qbig, Rbig, init_x, init_V, 'model', data_assoc);
+
+kf_est_pos = zeros(2, nlandmarks);
+kf_est_pos_cov = zeros(2, 2, nlandmarks);
+for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ kf_est_pos(:,i) = xsmooth(bi, T);
+ kf_est_pos_cov(:,:,i) = Vsmooth(bi, bi, T);
+end
+
+
+kf_est_pos
+loopy_est_pos
+
+kf_est_pos_time = zeros(2, nlandmarks, T);
+for t=1:T
+ for i=1:nlandmarks
+ bi = landmark_block(:,i);
+ kf_est_pos_time(:,i,t) = xsmooth(bi, t);
+ end
+end
+kf_est_pos_time % same for all t since smoothed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/arhmm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/arhmm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+% Make an HMM with autoregressive Gaussian observations (switching AR model)
+% X1 -> X2
+% | |
+% v v
+% Y1 -> Y2
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+intra = zeros(2);
+intra(1,2) = 1;
+inter = zeros(2);
+inter(1,1) = 1;
+inter(2,2) = 1;
+n = 2;
+
+Q = 2; % num hidden states
+O = 2; % size of observed vector
+
+ns = [Q O];
+dnodes = 1;
+onodes = [2];
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'observed', onodes);
+
+bnet.CPD{1} = tabular_CPD(bnet, 1);
+bnet.CPD{2} = gaussian_CPD(bnet, 2);
+bnet.CPD{3} = tabular_CPD(bnet, 3);
+bnet.CPD{4} = gaussian_CPD(bnet, 4);
+
+
+T = 10; % fixed length sequences
+
+engine = {};
+%engine{end+1} = hmm_inf_engine(bnet);
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+%engine{end+1} = smoother_engine(hmm_2TBN_inf_engine(bnet));
+%engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+
+inf_time = cmp_inference_dbn(bnet, engine, T, 'check_ll',1);
+learning_time = cmp_learning_dbn(bnet, engine, T, 'check_ll', 1);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/bat1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/bat1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+% Compare the speeds of various inference engines on the BAT DBN
+[bnet, names] = mk_bat_dbn;
+
+T = 3; % fixed length sequence - we make it short just for speed
+
+USEC = exist('@jtree_C_inf_engine/collect_evidence','file');
+
+disp('constructing engines for BAT');
+engine = {}; % time in seconds for inference
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T, 'useC', USEC); % 0.39
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet)); % 4.89
+engine{end+1} = jtree_dbn_inf_engine(bnet); % 4.45
+if 0
+engine{end+1} = jtree_ndx_dbn_inf_engine(bnet, 'ndx_type', 'SD'); % 2.98
+engine{end+1} = jtree_ndx_dbn_inf_engine(bnet, 'ndx_type', 'D'); % 3.52
+engine{end+1} = jtree_ndx_dbn_inf_engine(bnet, 'ndx_type', 'B'); % 2.40
+if USEC, engine{end+1} = jtree_C_dbn_inf_engine(bnet); end % 3.54
+%engine{end+1} = hmm_inf_engine(bnet, onodes); % too big
+end
+
+%tic; engine{end+1} = frontier_inf_engine(bnet); toc % very slow
+% The frontier engine thrashes badly on the BAT network
+%tic; engine{end+1} = bk_inf_engine(bnet, 'exact', onodes); toc % SLOW!
+
+%tic; engine{end+1} = bk_inf_engine(bnet, 'ff', onodes); toc
+
+%clusters{1} = [stringmatch({'LeftClr', 'RightClr', 'LatAct', 'Xdot', 'InLane'}, names)];
+%clusters{2} = [stringmatch({'FwdAct', 'Ydot', 'Stopped', 'EngStatus', 'FBStatus'}, names)];
+
+%tic; engine{end+1} = bk_inf_engine(bnet, clusters, onodes); toc
+
+disp('inference')
+time = cmp_inference_dbn(bnet, engine, T)
+
+disp('learning')
+time = cmp_learning_dbn(bnet, engine, T)
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/bkff1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/bkff1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+% Compare different implementations of fully factored Boyen Koller
+
+water = 1;
+if water
+ bnet = mk_water_dbn;
+else
+ N = 5;
+ Q = 2;
+ Y = 2;
+ bnet = mk_chmm(N, Q, Y);
+end
+ss = length(bnet.intra);
+
+engine = {};
+engine{end+1} = bk_inf_engine(bnet, 'clusters', 'ff');
+engine{end+1} = bk_ff_hmm_inf_engine(bnet);
+E = length(engine);
+
+T = 5;
+time = cmp_inference_dbn(bnet, engine, T, 'singletons_only', 1)
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/chmm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/chmm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+% Compare the speeds of various inference engines on a coupled HMM
+
+N = 3;
+Q = 2;
+rand('state', 0);
+randn('state', 0);
+discrete = 0;
+if discrete
+ Y = 2; % size of output alphabet
+else
+ Y = 3; % size of observed vectors
+end
+coupled = 1;
+bnet = mk_chmm(N, Q, Y, discrete, coupled);
+%bnet = mk_fhmm(N, Q, Y, discrete); % factorial HMM
+ss = length(bnet.node_sizes_slice);
+
+T = 3;
+
+USEC = exist('@jtree_C_inf_engine/collect_evidence','file');
+
+engine = {};
+engine{end+1} = jtree_dbn_inf_engine(bnet);
+%engine{end+1} = jtree_ndx_dbn_inf_engine(bnet, 'ndx_type', 'SD');
+%engine{end+1} = jtree_ndx_dbn_inf_engine(bnet, 'ndx_type', 'D');
+%engine{end+1} = jtree_ndx_dbn_inf_engine(bnet, 'ndx_type', 'B');
+if USEC, engine{end+1} = jtree_C_dbn_inf_engine(bnet); end
+engine{end+1} = hmm_inf_engine(bnet);
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+
+% times in matlab N=4 Q=4 T=5 (* = winner)
+% jtree SD B hmm dhmm unrolled
+% 0.6266 1.1563 8.3815 0.3069 0.1948* 0.8654 inf
+% 0.9057* 2.1522 12.6314 2.6847 2.3107 3.1905 learn
+
+%engine{end+1} = bk_inf_engine(bnet, 'ff', onodes);
+%engine{end+1} = pearl_unrolled_dbn_inf_engine(bnet, T);
+
+inf_time = cmp_inference_dbn(bnet, engine, T)
+learning_time = cmp_learning_dbn(bnet, engine, T)
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/cmp_inference_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/cmp_inference_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,100 @@
+function [time, engine] = cmp_inference_dbn(bnet, engine, T, varargin)
+% CMP_INFERENCE_DBN Compare several inference engines on a DBN
+% function [time, engine] = cmp_inference_dbn(bnet, engine, T, ...)
+%
+% engine{i} is the i'th inference engine.
+% time(e) = elapsed time for doing inference with engine e
+%
+% The list below gives optional arguments [default value in brackets].
+%
+% exact - specifies which engines do exact inference [ 1:length(engine) ]
+% singletons_only - if 1, we only call marginal_nodes, else this and marginal_family [0]
+% check_ll - 1 means we check that the log-likelihoods are correct [1]
+
+% set default params
+exact = 1:length(engine);
+singletons_only = 0;
+check_ll = 1;
+onodes = bnet.observed;
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'exact', exact = args{i+1};
+ case 'singletons_only', singletons_only = args{i+1};
+ case 'check_ll', check_ll = args{i+1};
+ case 'observed', onodes = args{i+1};
+ otherwise,
+ error(['unrecognized argument ' args{i}])
+ end
+end
+
+E = length(engine);
+ref = exact(1); % reference
+
+ss = length(bnet.intra);
+ev = sample_dbn(bnet, 'length', T);
+evidence = cell(ss,T);
+evidence(onodes,:) = ev(onodes, :);
+
+for i=1:E
+ tic;
+ [engine{i}, ll(i)] = enter_evidence(engine{i}, evidence);
+ time(i)=toc;
+ fprintf('engine %d took %6.4f seconds\n', i, time(i));
+end
+
+cmp = mysetdiff(exact, ref);
+if check_ll
+ for i=cmp(:)'
+ if ~approxeq(ll(ref), ll(i))
+ error(['engine ' num2str(i) ' has wrong ll'])
+ end
+ end
+end
+ll
+
+hnodes = mysetdiff(1:ss, onodes);
+
+if ~singletons_only
+ get_marginals(engine, hnodes, exact, 0, T);
+end
+get_marginals(engine, hnodes, exact, 1, T);
+
+%%%%%%%%%%
+
+function get_marginals(engine, hnodes, exact, singletons, T)
+
+bnet = bnet_from_engine(engine{1});
+N = length(bnet.intra);
+cnodes_bitv = zeros(1,N);
+cnodes_bitv(bnet.cnodes) = 1;
+ref = exact(1); % reference
+cmp = exact(2:end);
+E = length(engine);
+m = cell(1,E);
+
+for t=1:T
+ for n=1:N
+ %for n=hnodes(:)'
+ for e=1:E
+ if singletons
+ m{e} = marginal_nodes(engine{e}, n, t);
+ else
+ m{e} = marginal_family(engine{e}, n, t);
+ end
+ end
+ for e=cmp(:)'
+ assert(isequal(m{e}.domain, m{ref}.domain));
+ if cnodes_bitv(n) & isfield(m{e}, 'mu') & isfield(m{ref}, 'mu')
+ wrong = ~approxeq(m{ref}.mu, m{e}.mu) | ~approxeq(m{ref}.Sigma, m{e}.Sigma);
+ else
+ wrong = ~approxeq(m{ref}.T(:), m{e}.T(:));
+ end
+ if wrong
+ error(sprintf('engine %d is wrong; n=%d, t=%d, fam=%d', e, n, t, ~singletons))
+ end
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/cmp_learning_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/cmp_learning_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,89 @@
+function [time, CPD, LL, cases] = cmp_learning_dbn(bnet, engine, T, varargin)
+% CMP_LEARNING_DBN Compare a bunch of inference engines by learning a DBN
+% function [time, CPD, LL, cases] = cmp_learning_dbn(bnet, engine, exact, T, ncases, max_iter)
+%
+% engine{i} is the i'th inference engine.
+% time(e) = elapsed time for doing inference with engine e
+% CPD{e,c} is the learned CPD for eclass c in engine e
+% LL{e} is the learning curve for engine e
+% cases{i} is the i'th training case
+%
+% The list below gives optional arguments [default value in brackets].
+%
+% exact - specifies which engines do exact inference [ 1:length(engine) ]
+% check_ll - 1 means we check that the log-likelihoods are correct [1]
+% ncases - num. random training cases [2]
+% max_iter - max. num EM iterations [2]
+
+% set default params
+exact = 1:length(engine);
+check_ll = 1;
+ncases = 2;
+max_iter = 2;
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'exact', exact = args{i+1};
+ case 'check_ll', check_ll = args{i+1};
+ case 'ncases', ncases = args{i+1};
+ case 'max_iter', max_iter = args{i+1};
+ otherwise,
+ error(['unrecognized argument ' args{i}])
+ end
+end
+
+E = length(engine);
+ss = length(bnet.intra);
+onodes = bnet.observed;
+
+cases = cell(1, ncases);
+for i=1:ncases
+ ev = sample_dbn(bnet, 'length', T);
+ cases{i} = cell(ss,T);
+ cases{i}(onodes,:) = ev(onodes, :);
+end
+
+LL = cell(1,E);
+time = zeros(1,E);
+for i=1:E
+ tic
+ [bnet2{i}, LL{i}] = learn_params_dbn_em(engine{i}, cases, 'max_iter', max_iter);
+ time(i) = toc;
+ fprintf('engine %d took %6.4f seconds\n', i, time(i));
+end
+
+ref = exact(1); % reference
+cmp = mysetdiff(exact, ref);
+if check_ll
+ for i=cmp(:)'
+ if ~approxeq(LL{ref}, LL{i})
+ error(['engine ' num2str(i) ' has wrong ll'])
+ end
+ end
+end
+
+nCPDs = length(bnet.CPD);
+CPD = cell(E, nCPDs);
+tabular = zeros(1, nCPDs);
+for i=1:E
+ temp = bnet2{i};
+ for c=1:nCPDs
+ tabular(c) = isa(temp.CPD{c}, 'tabular_CPD');
+ CPD{i,c} = struct(temp.CPD{c});
+ end
+end
+
+for i=cmp(:)'
+ for c=1:nCPDs
+ if tabular(c)
+ assert(approxeq(CPD{i,c}.CPT, CPD{ref,c}.CPT));
+ else
+ assert(approxeq(CPD{i,c}.mean, CPD{ref,c}.mean));
+ assert(approxeq(CPD{i,c}.cov, CPD{ref,c}.cov));
+ assert(approxeq(CPD{i,c}.weights, CPD{ref,c}.weights));
+ end
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/cmp_online_inference.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/cmp_online_inference.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,97 @@
+function [time, engine] = cmp_online_inference(bnet, engine, T, varargin)
+% CMP_ONLINE_INFERENCE Compare several online inference engines on a DBN
+% function [time, engine] = cmp_online_inference(bnet, engine, T, ...)
+%
+% engine{i} is the i'th inference engine.
+% time(e) = elapsed time for doing inference with engine e
+%
+% The list below gives optional arguments [default value in brackets].
+%
+% exact - specifies which engines do exact inference [ 1:length(engine) ]
+% singletons_only - if 1, we only call marginal_nodes, else this and marginal_family [0]
+% check_ll - 1 means we check that the log-likelihoods are correct [1]
+
+% set default params
+exact = 1:length(engine);
+singletons_only = 0;
+check_ll = 1;
+onodes = bnet.observed;
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'exact', exact = args{i+1};
+ case 'singletons_only', singletons_only = args{i+1};
+ case 'check_ll', check_ll = args{i+1};
+ case 'observed', onodes = args{i+1};
+ otherwise,
+ error(['unrecognized argument ' args{i}])
+ end
+end
+
+E = length(engine);
+ref = exact(1); % reference
+cmp = mysetdiff(exact, ref);
+
+ss = length(bnet.intra);
+hnodes = mysetdiff(1:ss, onodes);
+ev = sample_dbn(bnet, 'length', T);
+evidence = cell(ss,T);
+evidence(onodes,:) = ev(onodes, :);
+
+time = zeros(1,E);
+for t=1:T
+ for e=1:E
+ tic;
+ [engine{e}, ll(e)] = enter_evidence(engine{e}, evidence(:,t), t);
+ time(e)= time(e) + toc;
+ end
+ if check_ll
+ for e=cmp(:)'
+ if ~approxeq(ll(ref), ll(e))
+ error(['engine ' num2str(e) ' has wrong ll'])
+ end
+ end
+ end
+ if ~singletons_only
+ check_marginals(engine, hnodes, exact, 0, t);
+ end
+ check_marginals(engine, hnodes, exact, 1, t);
+end
+
+
+%%%%%%%%%%
+
+function check_marginals(engine, hnodes, exact, singletons, t)
+
+bnet = bnet_from_engine(engine{1});
+N = length(bnet.intra);
+cnodes_bitv = zeros(1,N);
+cnodes_bitv(bnet.cnodes) = 1;
+ref = exact(1); % reference
+cmp = exact(2:end);
+E = length(engine);
+m = cell(1,E);
+
+for n=1:N
+ %for n=hnodes(:)'
+ for e=1:E
+ if singletons
+ m{e} = marginal_nodes(engine{e}, n, t);
+ else
+ m{e} = marginal_family(engine{e}, n, t);
+ end
+ end
+ for e=cmp(:)'
+ assert(isequal(m{e}.domain, m{ref}.domain));
+ if cnodes_bitv(n) & isfield(m{e}, 'mu') & isfield(m{ref}, 'mu')
+ wrong = ~approxeq(m{ref}.mu, m{e}.mu) | ~approxeq(m{ref}.Sigma, m{e}.Sigma);
+ else
+ wrong = ~approxeq(m{ref}.T(:), m{e}.T(:));
+ end
+ if wrong
+ error(sprintf('engine %d is wrong; n=%d, t=%d, fam=%d', e, n, t, ~singletons))
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/dhmm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/dhmm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,66 @@
+% Make an HMM with discrete observations
+% X1 -> X2
+% | |
+% v v
+% Y1 Y2
+
+intra = zeros(2);
+intra(1,2) = 1;
+inter = zeros(2);
+inter(1,1) = 1;
+n = 2;
+
+Q = 2; % num hidden states
+O = 2; % num observable symbols
+
+ns = [Q O];
+dnodes = 1:2;
+onodes = [2];
+eclass1 = [1 2];
+eclass2 = [3 2];
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'eclass1', eclass1, 'eclass2', eclass2, ...
+ 'observed', onodes);
+
+rand('state', 0);
+prior1 = normalise(rand(Q,1));
+transmat1 = mk_stochastic(rand(Q,Q));
+obsmat1 = mk_stochastic(rand(Q,O));
+bnet.CPD{1} = tabular_CPD(bnet, 1, prior1);
+bnet.CPD{2} = tabular_CPD(bnet, 2, obsmat1);
+bnet.CPD{3} = tabular_CPD(bnet, 3, transmat1);
+
+
+T = 5; % fixed length sequences
+
+engine = {};
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+engine{end+1} = hmm_inf_engine(bnet);
+engine{end+1} = smoother_engine(hmm_2TBN_inf_engine(bnet));
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+if 1
+%engine{end+1} = frontier_inf_engine(bnet); % broken
+engine{end+1} = bk_inf_engine(bnet, 'clusters', {[1]});
+engine{end+1} = jtree_dbn_inf_engine(bnet);
+end
+
+inf_time = cmp_inference_dbn(bnet, engine, T);
+
+ncases = 2;
+max_iter = 2;
+[learning_time, CPD, LL, cases] = cmp_learning_dbn(bnet, engine, T, 'ncases', ncases, 'max_iter', max_iter);
+
+% Compare to HMM toolbox
+
+data = zeros(ncases, T);
+for i=1:ncases
+ %data(i,:) = cat(2, cases{i}{onodes,:});
+ data(i,:) = cell2num(cases{i}(onodes,:));
+end
+[LL2, prior2, transmat2, obsmat2] = dhmm_em(data, prior1, transmat1, obsmat1, 'max_iter', max_iter);
+
+e = 1;
+assert(approxeq(prior2, CPD{e,1}.CPT))
+assert(approxeq(obsmat2, CPD{e,2}.CPT))
+assert(approxeq(transmat2, CPD{e,3}.CPT))
+assert(approxeq(LL2, LL{e}))
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/ehmm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/ehmm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+% make the structure of an embedded HMM with 2 rows and 3 columns
+
+% 1------------>2
+% |\ \ | \ \
+% 3->4->5 6->7->8
+
+n = 8;
+dag = zeros(n);
+dag(1,[2 3 4 5])=1;
+dag(2,[6 7 8])=1;
+for i=3:4
+ dag(i,i+1)=1;
+end
+for i=6:7
+ dag(i,i+1)=1;
+end
+ns = 2*ones(1,n);
+bnet = mk_bnet(dag,ns);
+for i=1:n
+ bnet.CPD{i}=tabular_CPD(bnet,i);
+end
+[jtree, root, cliques] = graph_to_jtree(moralize(bnet.dag), ones(1,n), {}, {});
+%[jtree, root, cliques, B, w, elim_order, moral_edges, fill_in_edges] = dag_to_jtree(bnet);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/fhmm_infer.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/fhmm_infer.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,324 @@
+function [loglik, gamma] = fhmm_infer(inter, CPTs_slice1, CPTs, obsmat, node_sizes)
+% FHMM_INFER Exact inference for a factorial HMM.
+% [loglik, gamma] = fhmm_infer(inter, CPTs_slice1, CPTs, obsmat, node_sizes)
+%
+% Inputs:
+% inter - the inter-slice adjacency matrix
+% CPTs_slice1{s}(j) = Pr(Q(s,1) = j) where Q(s,t) = hidden node s in slice t
+% CPT{s}(i1, i2, ..., j) = Pr(Q(s,t) = j | Pa(s,t-1) = i1, i2, ...),
+% obsmat(i,t) = Pr(y(t) | Q(t)=i)
+% node_sizes is a vector with the cardinality of the hidden nodes
+%
+% Outputs:
+% gamma(i,t) = Pr(X(t)=i | O(1:T)) as in an HMM,
+% except that i is interpreted as an M digit, base-K number (if there are M chains each of cardinality K).
+%
+%
+% For M chains each of cardinality K, the frontiers (i.e., cliques)
+% contain M+1 nodes, and it takes M steps to advance the frontier by one time step,
+% so the run time is O(T M K^(M+1)).
+% An HMM takes O(T S^2) where S is the size of the state space.
+% Collapsing the FHMM to an HMM results in S = K^M.
+% For details, see
+% "The Factored Frontier Algorithm for Approximate Inference in DBNs",
+% Kevin Murphy and Yair Weiss, submitted to NIPS 2000.
+%
+% The frontier algorithm makes the following topological assumptions:
+%
+% - All nodes are persistent (connect to the next slice)
+% - No connections within a timeslice
+% - There is a single observation variable, which depends on all the hidden nodes
+% - Each node can have several parents in the previous time slice (generalizes a FHMM slightly)
+%
+
+% The forwards pass of the frontier algorithm can be explained with the following example.
+% Suppose we have 3 hidden nodes per slice, A, B, C.
+% The goal is to compute alpha(j, t) = Pr( (A_t,B_t,C_t)=j | Y(1:t))
+% We move alpha from t to t+1 one node at a time, as follows.
+% We define the following quantities:
+% s([a1 b1 c1], 1) = Prob(A(t)=a1, B(t)=b1, C(t)=c1 | Y(1:t)) = alpha(j, t)
+% s([a2 b1 c1], 2) = Prob(A(t+1)=a2, B(t)=b1, C(t)=c1 | Y(1:t))
+% s([a2 b2 c1], 3) = Prob(A(t+1)=a2, B(t+1)=b2, C(t)=c1 | Y(1:t))
+% s([a2 b2 c2], 4) = Prob(A(t+1)=a2, B(t+1)=b2, C(t+1)=c2 | Y(1:t))
+% s([a2 b2 c2], 5) = Prob(A(t+1)=a2, B(t+1)=b2, C(t+1)=c2 | Y(1:t+1)) = alpha(j, t+1)
+%
+% These can be computed recursively as follows:
+%
+% s([a2 b1 c1], 2) = sum_{a1} P(a2|a1) s([a1 b1 c1], 1)
+% s([a2 b2 c1], 3) = sum_{b1} P(b2|b1) s([a2 b1 c1], 2)
+% s([a2 b2 c2], 4) = sum_{c1} P(c2|c1) s([a2 b2 c1], 1)
+% s([a2 b2 c2], 5) = normalise( s([a2 b2 c2], 4) .* P(Y(t+1)|a2,b2,c2)
+
+
+[kk,ll,mm] = make_frontier_indices(inter, node_sizes); % can pass in as args
+
+scaled = 1;
+
+M = length(node_sizes);
+S = prod(node_sizes);
+T = size(obsmat, 2);
+
+alpha = zeros(S, T);
+beta = zeros(S, T);
+gamma = zeros(S, T);
+scale = zeros(1,T);
+tiny = exp(-700);
+
+
+alpha(:,1) = make_prior_from_CPTs(CPTs_slice1, node_sizes);
+alpha(:,1) = alpha(:,1) .* obsmat(:, 1);
+
+if scaled
+ s = sum(alpha(:,1));
+ if s==0, s = s + tiny; end
+ scale(1) = 1/s;
+else
+ scale(1) = 1;
+end
+alpha(:,1) = alpha(:,1) * scale(1);
+
+%a = zeros(S, M+1);
+%b = zeros(S, M+1);
+anew = zeros(S,1);
+aold = zeros(S,1);
+bnew = zeros(S,1);
+bold = zeros(S,1);
+
+for t=2:T
+ %a(:,1) = alpha(:,t-1);
+ aold = alpha(:,t-1);
+
+ c = 1;
+ for i=1:M
+ ns = node_sizes(i);
+ cpt = CPTs{i};
+ for j=1:S
+ s = 0;
+ for xx=1:ns
+ %k = kk(xx,j,i);
+ %l = ll(xx,j,i);
+ k = kk(c);
+ l = ll(c);
+ c = c + 1;
+ % s = s + a(k,i) * CPTs{i}(l);
+ s = s + aold(k) * cpt(l);
+ end
+ %a(j,i+1) = s;
+ anew(j) = s;
+ end
+ aold = anew;
+ end
+
+ %alpha(:,t) = a(:,M+1) .* obsmat(:, obs(t));
+ alpha(:,t) = anew .* obsmat(:, t);
+
+ if scaled
+ s = sum(alpha(:,t));
+ if s==0, s = s + tiny; end
+ scale(t) = 1/s;
+ else
+ scale(t) = 1;
+ end
+ alpha(:,t) = alpha(:,t) * scale(t);
+
+end
+
+
+beta(:,T) = ones(S,1) * scale(T);
+for t=T-1:-1:1
+ %b(:,1) = beta(:,t+1) .* obsmat(:, obs(t+1));
+ bold = beta(:,t+1) .* obsmat(:, t+1);
+
+ c = 1;
+ for i=1:M
+ ns = node_sizes(i);
+ cpt = CPTs{i};
+ for j=1:S
+ s = 0;
+ for xx=1:ns
+ %k = kk(xx,j,i);
+ %m = mm(xx,j,i);
+ k = kk(c);
+ m = mm(c);
+ c = c + 1;
+ % s = s + b(k,i) * CPTs{i}(m);
+ s = s + bold(k) * cpt(m);
+ end
+ %b(j,i+1) = s;
+ bnew(j) = s;
+ end
+ bold = bnew;
+ end
+ % beta(:,t) = b(:,M+1) * scale(t);
+ beta(:,t) = bnew * scale(t);
+end
+
+
+if scaled
+ loglik = -sum(log(scale)); % scale(i) is finite
+else
+ lik = alpha(:,1)' * beta(:,1);
+ loglik = log(lik+tiny);
+end
+
+for t=1:T
+ gamma(:,t) = normalise(alpha(:,t) .* beta(:,t));
+end
+
+%%%%%%%%%%%
+
+function [kk,ll,mm] = make_frontier_indices(inter, node_sizes)
+%
+% Precompute indices for use in the frontier algorithm.
+% These only depend on the topology, not the parameters or data.
+% Hence we can compute them outside of fhmm_infer.
+% This saves a lot of run-time computation.
+
+M = length(node_sizes);
+S = prod(node_sizes);
+
+mns = max(node_sizes);
+kk = zeros(mns, S, M);
+ll = zeros(mns, S, M);
+mm = zeros(mns, S, M);
+
+for i=1:M
+ for j=1:S
+ u = ind2subv(node_sizes, j);
+ x = u(i);
+ for xx=1:node_sizes(i)
+ uu = u;
+ uu(i) = xx;
+ k = subv2ind(node_sizes, uu);
+ kk(xx,j,i) = k;
+ ps = find(inter(:,i)==1);
+ ps = ps(:)';
+ l = subv2ind(node_sizes([ps i]), [uu(ps) x]); % sum over parent
+ ll(xx,j,i) = l;
+ m = subv2ind(node_sizes([ps i]), [u(ps) xx]); % sum over child
+ mm(xx,j,i) = m;
+ end
+ end
+end
+
+%%%%%%%%%
+
+function prior=make_prior_from_CPTs(indiv_priors, node_sizes)
+%
+% composite_prior=make_prior(individual_priors, node_sizes)
+% Make the prior for the first node in a Markov chain
+% from the priors on each node in the equivalent DBN.
+% prior{i}(j) = Pr(X_i=j), where X_i is the i'th node in slice 1.
+% composite_prior(i) = Pr(slice1 = i).
+
+n = length(indiv_priors);
+S = prod(node_sizes);
+prior = zeros(S,1);
+for i=1:S
+ vi = ind2subv(node_sizes, i);
+ p = 1;
+ for k=1:n
+ p = p * indiv_priors{k}(vi(k));
+ end
+ prior(i) = p;
+end
+
+
+
+%%%%%%%%%%%
+
+function [loglik, alpha, beta] = FHMM_slow(inter, CPTs_slice1, CPTs, obsmat, node_sizes, data)
+%
+% Same as the above, except we don't use the optimization of computing the indices outside the loop.
+
+
+scaled = 1;
+
+M = length(node_sizes);
+S = prod(node_sizes);
+[numex T] = size(data);
+
+obs = data;
+
+alpha = zeros(S, T);
+beta = zeros(S, T);
+a = zeros(S, M+1);
+b = zeros(S, M+1);
+scale = zeros(1,T);
+
+alpha(:,1) = make_prior_from_CPTs(CPTs_slice1, node_sizes);
+alpha(:,1) = alpha(:,1) .* obsmat(:, obs(1));
+if scaled
+ s = sum(alpha(:,1));
+ if s==0, s = s + tiny; end
+ scale(1) = 1/s;
+else
+ scale(1) = 1;
+end
+alpha(:,1) = alpha(:,1) * scale(1);
+
+for t=2:T
+ fprintf(1, 't %d\n', t);
+ a(:,1) = alpha(:,t-1);
+ for i=1:M
+ for j=1:S
+ u = ind2subv(node_sizes, j);
+ xnew = u(i);
+ s = 0;
+ for xold=1:node_sizes(i)
+ uold = u;
+ uold(i) = xold;
+ k = subv2ind(node_sizes, uold);
+ ps = find(inter(:,i)==1);
+ ps = ps(:)';
+ l = subv2ind(node_sizes([ps i]), [uold(ps) xnew]);
+ s = s + a(k,i) * CPTs{i}(l);
+ end
+ a(j,i+1) = s;
+ end
+ end
+ alpha(:,t) = a(:,M+1) .* obsmat(:, obs(t));
+
+ if scaled
+ s = sum(alpha(:,t));
+ if s==0, s = s + tiny; end
+ scale(t) = 1/s;
+ else
+ scale(t) = 1;
+ end
+ alpha(:,t) = alpha(:,t) * scale(t);
+
+end
+
+
+beta(:,T) = ones(S,1) * scale(T);
+for t=T-1:-1:1
+ fprintf(1, 't %d\n', t);
+ b(:,1) = beta(:,t+1) .* obsmat(:, obs(t+1));
+ for i=1:M
+ for j=1:S
+ u = ind2subv(node_sizes, j);
+ xold = u(i);
+ s = 0;
+ for xnew=1:node_sizes(i)
+ unew = u;
+ unew(i) = xnew;
+ k = subv2ind(node_sizes, unew);
+ ps = find(inter(:,i)==1);
+ ps = ps(:)';
+ l = subv2ind(node_sizes([ps i]), [u(ps) xnew]);
+ s = s + b(k,i) * CPTs{i}(l);
+ end
+ b(j,i+1) = s;
+ end
+ end
+ beta(:,t) = b(:,M+1) * scale(t);
+end
+
+
+if scaled
+ loglik = -sum(log(scale)); % scale(i) is finite
+else
+ lik = alpha(:,1)' * beta(:,1);
+ loglik = log(lik+tiny);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/filter_test1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/filter_test1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+% Compare online filtering algorithms on some DBNs
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+if 0
+ N = 3;
+ Q = 2;
+ obs_size = 1;
+ discrete_obs = 0;
+ bnet = mk_chmm(N, Q, obs_size, discrete_obs);
+else
+ %bnet = mk_bat_dbn;
+ bnet = mk_water_dbn;
+end
+
+T = 3;
+
+engine = {};
+engine{end+1} = filter_engine(hmm_2TBN_inf_engine(bnet));
+engine{end+1} = filter_engine(jtree_2TBN_inf_engine(bnet));
+
+time = cmp_online_inference(bnet, engine, T);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/ghmm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/ghmm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,64 @@
+% Make an HMM with Gaussian observations
+% X1 -> X2
+% | |
+% v v
+% Y1 Y2
+
+intra = zeros(2);
+intra(1,2) = 1;
+inter = zeros(2);
+inter(1,1) = 1;
+n = 2;
+
+Q = 2; % num hidden states
+O = 2; % size of observed vector
+ns = [Q O];
+bnet = mk_dbn(intra, inter, ns, 'discrete', 1, 'observed', 2);
+
+prior0 = normalise(rand(Q,1));
+transmat0 = mk_stochastic(rand(Q,Q));
+mu0 = rand(O,Q);
+Sigma0 = repmat(eye(O), [1 1 Q]);
+bnet.CPD{1} = tabular_CPD(bnet, 1, prior0);
+%% we set the cov prior to 0 to give same results as HMM toolbox
+%bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', mu0, 'cov', Sigma0, 'cov_prior_weight', 0);
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', mu0, 'cov', Sigma0);
+bnet.CPD{3} = tabular_CPD(bnet, 3, transmat0);
+
+
+T = 5; % fixed length sequences
+
+engine = {};
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+engine{end+1} = smoother_engine(hmm_2TBN_inf_engine(bnet));
+engine{end+1} = hmm_inf_engine(bnet);
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+%engine{end+1} = frontier_inf_engine(bnet);
+engine{end+1} = bk_inf_engine(bnet, 'clusters', {[1]});
+engine{end+1} = jtree_dbn_inf_engine(bnet);
+
+
+inf_time = cmp_inference_dbn(bnet, engine, T);
+
+ncases = 2;
+max_iter = 2;
+[learning_time, CPD, LL, cases] = cmp_learning_dbn(bnet, engine, T, 'ncases', ncases, 'max_iter', max_iter);
+
+% Compare to HMM toolbox
+
+data = zeros(O, T, ncases);
+for i=1:ncases
+ data(:,:,i) = cell2num(cases{i}(bnet.observed, :));
+end
+
+tic
+[LL2, prior2, transmat2, mu2, Sigma2] = mhmm_em(data, prior0, transmat0, mu0, Sigma0, [], 'max_iter', max_iter);
+t=toc;
+disp(['HMM toolbox took ' num2str(t) ' seconds '])
+
+e = 1;
+assert(approxeq(prior2, CPD{e,1}.CPT))
+assert(approxeq(mu2, CPD{e,2}.mean))
+assert(approxeq(Sigma2, CPD{e,2}.cov))
+assert(approxeq(transmat2, CPD{e,3}.CPT))
+assert(approxeq(LL2, LL{e}))
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/ho1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/ho1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,156 @@
+function ho1()
+
+% Example of how to create a higher order DBN
+% Written by Rainer Deventer 3/28/03
+
+bnet = createBNetNL();
+
+%%%%%%%%%%%%
+
+
+function bnet = createBNetNL(varargin)
+ % Generate a Bayesian network, which is able to model nonlinearities at
+% the input. The only input is the order of the dynamic system. If this
+% parameter is missing, the an order of two is assumed
+if nargin > 0
+ order = varargin{1}
+else
+ order = 2;
+end
+
+ss = 6; % For each time slice the following nodes are modeled
+ % ud(t_k) Discrete node, which decides whether saturation is reached.
+ % Node number 2
+ % uv(t_k) Visible input node with node number 2
+ % uh(t_k) Hidden input node with node number 3
+ % y(t_k) Modeled output, Number 4
+ % z(t_k) Disturbing variable, number 5
+ % q(t_k), number6 6
+
+intra = zeros(ss,ss);
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Within each timeslice ud(t_k) is connected with uv(t_k) and uh(t_k) %
+% This part is used to model saturation %
+% A connection from uv(t_k) to uh(t_k) is omitted %
+% Additionally y(t_k) is connected with q(t_k). To model the disturbing%
+% value z(t_k) is connected with q(t_k). %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+intra(1,2:3) = 1; % Connections ud(t_k) -> uv(t_k) and ud(t_k) -> uh(t_k)
+intra(4:5,6) = 1; % Connectios y(t_k) -> q(t_k) and z(t_k) -> q(t_k)
+
+
+
+inter = zeros(ss,ss,order);
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% The Markov assumption is not met as connections from time slice t to t+2 %
+% exist. %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+for i = 1:order
+ if i == 1
+ inter(1,1,i) = 1; %Connect the discrete nodes. This is necessary to improve
+ %the disturbing reaction
+ inter(3,4,i) = 1; %Connect uh(t_{k-1}) with y(t_k)
+ inter(4,4,i) = 1; %Connect y(t_{k-1}) with y(t_k)
+ inter(5,5,i) = 1; %Connect z(t_{k-1}) with z(t_k)
+ else
+ inter(3,4,i) = 1; %Connect uh(t_{k-i}) with y(t_k)
+ inter(4,4,i) = 1; %Connect y(t_{k-i}) with y(t_k)
+ end
+end
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Define the dimensions of the discrete nodes. Node 1 has two states %
+% 1 = lower saturation reached %
+% 2 = Upper saturation reached %
+% Values in between are model by probabilities between 0 and 1 %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+node_sizes = ones(1,ss);
+node_sizes(1) = 2;
+dnodes = [1];
+
+eclass = [1:6;7 2:3 8 9 6;7 2:3 10 11 6];
+bnet = mk_higher_order_dbn(intra,inter,node_sizes,...
+ 'discrete',dnodes,...
+ 'eclass',eclass);
+
+cov_high = 400;
+cov_low = 0.01;
+weight1 = randn(1,1);
+weight2 = randn(1,1);
+weight3 = randn(1,1);
+weight4 = randn(1,1);
+
+numOfNodes = 5 + order;
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Nodes of the first time-slice %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Discrete input node,
+bnet.CPD{1} = tabular_CPD(bnet,1,'CPT',[1/2 1/2],'adjustable',0);
+
+
+% Modeled visible input
+bnet.CPD{2} = gaussian_CPD(bnet,2,'mean',[0 10],'clamp_mean',1,...
+ 'cov',[10 10],'clamp_cov',1);
+
+% Modeled hidden input
+bnet.CPD{3} = gaussian_CPD(bnet,3,'mean',[0, 10],'clamp_mean',1,...
+ 'cov',[0.1 0.1],'clamp_cov',1);
+
+% Modeled output in the first timeslice, thus there are no parents
+% Usuallz the output nodes get a low covariance. But in the first
+% time-slice a prediction of the output is not possible due to
+% missing information
+bnet.CPD{4} = gaussian_CPD(bnet,4,'mean',0,'clamp_mean',1,...
+ 'cov',cov_high,'clamp_cov',1);
+
+%Disturbance
+bnet.CPD{5} = gaussian_CPD(bnet,5,'mean',0,...
+ 'cov',[4],...
+ 'clamp_mean',1,...
+ 'clamp_cov',1);
+
+%Observed output.
+bnet.CPD{6} = gaussian_CPD(bnet,6,'mean',0,...
+ 'clamp_mean',1,...
+ 'cov',cov_low,'clamp_cov',1,...
+ 'weights',[1 1],'clamp_weights',1);
+
+% Discrete node at second time slice
+bnet.CPD{7} = tabular_CPD(bnet,7,'CPT',[0.6 0.4 0.4 0.6],'adjustable',0);
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Node for the model output %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+bnet.CPD{8} = gaussian_CPD(bnet,10,'mean',0,...
+ 'cov',cov_high,...
+ 'clamp_mean',1,...
+ 'clamp_cov',1);
+% 'weights',[0.0791 0.9578]);
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Node for the disturbance %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+bnet.CPD{9} = gaussian_CPD(bnet,11,'mean',0,'clamp_mean',1,...
+ 'cov',[4],'clamp_cov',1,...
+ 'weights',[1],'clamp_weights',1);
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Node for the model output %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+bnet.CPD{10} = gaussian_CPD(bnet,16,'mean',0,'clamp_mean',1,...
+ 'cov',cov_low,'clamp_cov',1);
+% 'weights',[0.0188 -0.0067 0.0791 0.9578]);
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Node for the disturbance %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+bnet.CPD{11} = gaussian_CPD(bnet,17,'mean',0,'clamp_mean',1,...
+ 'cov',[0.2],'clamp_cov',1,...
+ 'weights',[1],'clamp_weights',1);
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/jtree_clq_test.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/jtree_clq_test.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,150 @@
+% Construct various DBNs and examine their clique structure.
+% This was used to generate various figures in chap 3-4 of my thesis.
+
+% Examine the cliques in the unrolled mildew net
+
+%dbn = mk_mildew_dbn;
+dbn = mk_chmm(4);
+ss = dbn.nnodes_per_slice;
+T = 7;
+N = ss*T;
+bnet = dbn_to_bnet(dbn, T);
+
+constrained = 0;
+if constrained
+ stages = num2cell(unroll_set(1:ss, ss, T), 1);
+else
+ stages = { 1:N; };
+end
+clusters = {};
+%[jtree, root, cliques, B, w, elim_order, moral_edges, fill_in_edges] = ...
+% dag_to_jtree(bnet, bnet.observed, stages, clusters);
+[jtree, root, cliques] = graph_to_jtree(moralize(bnet.dag), ones(1,N), stages, clusters);
+
+flip=1;
+clf;[dummyx, dummyy, h] = draw_dbn(dbn.intra, dbn.inter, flip, T, -1);
+dir = '/home/eecs/murphyk/WP/Thesis/Figures/Inf/MildewUnrolled';
+mk_ps_from_clqs(dbn, T, cliques, [])
+%mk_collage_from_clqs(dir, cliques)
+
+
+% Examine the cliques in the cascade DBN
+
+% A-A
+% \
+% B B
+% \
+% C C
+% \
+% D D
+ss = 4;
+intra = zeros(ss);
+inter = zeros(ss);
+inter(1, [1 2])=1;
+for i=2:ss-1
+ inter(i,i+1)=1;
+end
+
+
+% 2 coupled HMMs 1,3 and 2,4
+ss = 4;
+intra = zeros(ss);
+inter = zeros(ss); % no persistent edges
+%inter = diag(ones(ss,1)); % persitence edges
+inter(1,3)=1; inter(3,1)=1;
+inter(2,4)=1; inter(4,2)=1;
+
+%bnet = mk_fhmm(3);
+bnet = mk_chmm(4);
+intra = bnet.intra;
+inter = bnet.inter;
+
+clqs = compute_minimal_interface(intra, inter);
+celldisp(clqs)
+
+
+
+
+% A A
+% \
+% B B
+% \
+% C C
+% \
+% D-D
+ss = 4;
+intra = zeros(ss);
+inter = zeros(ss);
+for i=1:ss-1
+ inter(i,i+1)=1;
+end
+inter(4,4)=1;
+
+
+
+ns = 2*ones(1,ss);
+dbn = mk_dbn(intra, inter, ns);
+for i=2*ss
+ dbn.CPD{i} = tabular_CPD(bnet, i);
+end
+
+T = 4;
+N = ss*T;
+bnet = dbn_to_bnet(dbn, T);
+
+constrained = 1;
+if constrained
+ % elim first 3 slices first in any order
+ stages = {1:12, 13:16};
+ %stages = num2cell(unroll_set(1:ss, ss, T), 1);
+else
+ stages = { 1:N; };
+end
+clusters = {};
+%[jtree, root, cliques, B, w, elim_order, moral_edges, fill_in_edges] = ...
+% dag_to_jtree(bnet, bnet.observed, stages, clusters);
+[jtree, root, cliques] = graph_to_jtree(moralize(bnet.dag), ones(1,N), stages, clusters);
+
+
+
+
+
+% Examine the cliques in the 1.5 slice DBN
+
+%dbn = mk_mildew_dbn;
+dbn = mk_water_dbn;
+%dbn = mk_bat_dbn;
+ss = dbn.nnodes_per_slice;
+int = compute_fwd_interface(dbn);
+bnet15 = mk_slice_and_half_dbn(dbn, int);
+N = length(bnet15.dag);
+stages = {1:N};
+
+% bat
+%cl1 = [16 17 19 7 14];
+%cl2 = [27 25 21 23 20];
+%clusters = {cl1, cl2, cl1+ss, cl2+ss};
+
+% water
+%cl1 = 1:2; cl2 = 3:6; cl3 = 7:8;
+%clusters = {cl1, cl2, cl3, cl1+ss, cl2+ss, cl3+ss};
+
+%clusters = {};
+clusters = {int, int+ss};
+%[jtree, root, cliques, B, w, elim_order, moral_edges, fill_in_edges] = ...
+% dag_to_jtree(bnet15, bnet.observed, stages, clusters);
+[jtree, root, cliques] = graph_to_jtree(moralize(bnet15.dag), ones(1,N), stages, clusters);
+
+clq_len = [];
+for c=1:length(cliques)
+ clq_len(c) = length(cliques{c});
+end
+hist(clq_len, 1:max(clq_len));
+h=hist(clq_len, 1:max(clq_len));
+axis([1 max(clq_len)+1 0 max(h)+1])
+xlabel('clique size','fontsize',16)
+ylabel('number','fontsize',16)
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/jtree_clq_test2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/jtree_clq_test2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+
+%bnet = mk_uffe_dbn;
+bnet = mk_mildew_dbn;
+ss = length(bnet.intra);
+
+% construct jtree from 1.5 slice DBN
+
+int = compute_fwd_interface(bnet.intra, bnet.inter);
+bnet15 = mk_slice_and_half_dbn(bnet, int);
+
+% use unconstrained elimination,
+% but force there to be a clique containing both interfaces
+clusters = {int, int+ss};
+jtree_engine = jtree_inf_engine(bnet15, 'clusters', clusters, 'root', int+ss);
+S=struct(jtree_engine)
+in_clq = clq_containing_nodes(jtree_engine, int);
+out_clq = clq_containing_nodes(jtree_engine, int+ss)
+
+
+% Also make a jtree from slice 1
+bnet1 = mk_bnet(bnet.intra1, bnet.node_sizes_slice);
+jtree_engine1 = jtree_inf_engine(bnet1, 'clusters', {int}, 'root', int);
+S1=struct(jtree_engine1)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/kalman1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/kalman1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,66 @@
+% Make a linear dynamical system
+% X1 -> X2
+% | |
+% v v
+% Y1 Y2
+
+intra = zeros(2);
+intra(1,2) = 1;
+inter = zeros(2);
+inter(1,1) = 1;
+n = 2;
+
+X = 2; % size of hidden state
+Y = 2; % size of observable state
+ns = [X Y];
+bnet = mk_dbn(intra, inter, ns, 'discrete', [], 'observed', 2);
+
+x0 = rand(X,1);
+V0 = eye(X);
+C0 = rand(Y,X);
+R0 = eye(Y);
+A0 = rand(X,X);
+Q0 = eye(X);
+
+bnet.CPD{1} = gaussian_CPD(bnet, 1, 'mean', x0, 'cov', V0, 'cov_prior_weight', 0);
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', zeros(Y,1), 'cov', R0, 'weights', C0, ...
+ 'clamp_mean', 1, 'cov_prior_weight', 0);
+bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', zeros(X,1), 'cov', Q0, 'weights', A0, ...
+ 'clamp_mean', 1, 'cov_prior_weight', 0);
+
+
+T = 5; % fixed length sequences
+
+clear engine;
+engine{1} = kalman_inf_engine(bnet);
+engine{2} = jtree_unrolled_dbn_inf_engine(bnet, T);
+engine{3} = jtree_dbn_inf_engine(bnet);
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+N = length(engine);
+
+
+inf_time = cmp_inference_dbn(bnet, engine, T);
+
+ncases = 2;
+max_iter = 2;
+[learning_time, CPD, LL, cases] = cmp_learning_dbn(bnet, engine, T, 'ncases', ncases, 'max_iter', max_iter);
+
+
+% Compare to KF toolbox
+
+data = zeros(Y, T, ncases);
+for i=1:ncases
+ data(:,:,i) = cell2num(cases{i}(onodes, :));
+end
+[A2, C2, Q2, R2, x2, V2, LL2trace] = learn_kalman(data, A0, C0, Q0, R0, x0, V0, max_iter);
+
+
+e = 1;
+assert(approxeq(x2, CPD{e,1}.mean))
+assert(approxeq(V2, CPD{e,1}.cov))
+assert(approxeq(C2, CPD{e,2}.weights))
+assert(approxeq(R2, CPD{e,2}.cov));
+assert(approxeq(A2, CPD{e,3}.weights))
+assert(approxeq(Q2, CPD{e,3}.cov));
+assert(approxeq(LL2trace, LL{1}))
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/kjaerulff1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/kjaerulff1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+% Compare the speeds of various inference engines on the DBN in Kjaerulff
+% "dHugin: A computational system for dynamic time-sliced {B}ayesian networks",
+% Intl. J. Forecasting 11:89-111, 1995.
+%
+% The intra structure is (all arcs point downwards)
+%
+% 1 -> 2
+% \ /
+% 3
+% |
+% 4
+% / \
+% 5 6
+% \ /
+% 7
+% |
+% 8
+%
+% The inter structure is 1->1, 4->4, 8->8
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+ss = 8;
+intra = zeros(ss);
+intra(1,[2 3])=1;
+intra(2,3)=1;
+intra(3,4)=1;
+intra(4,[5 6])=1;
+intra([5 6], 7)=1;
+intra(7,8)=1;
+
+inter = zeros(ss);
+inter(1,1)=1;
+inter(4,4)=1;
+inter(8,8)=1;
+
+ns = 2*ones(1,ss);
+onodes = 2;
+bnet = mk_dbn(intra, inter, ns, 'observed', onodes, 'eclass2', (1:ss)+ss);
+for i=1:2*ss
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+T = 4;
+
+engine = {};
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+engine{end+1} = jtree_dbn_inf_engine(bnet);
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+%engine{end+1} = smoother_engine(hmm_2TBN_inf_engine(bnet)); % observed nodes have children
+
+inf_time = cmp_inference_dbn(bnet, engine, T)
+learning_time = cmp_learning_dbn(bnet, engine, T)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/loopy_dbn1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/loopy_dbn1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+N = 1; % single chain = HMM - should give exact answers
+Q = 2;
+rand('state', 0);
+randn('state', 0);
+discrete = 1;
+if discrete
+ Y = 2; % size of output alphabet
+else
+ Y = 1;
+end
+coupled = 1;
+bnet = mk_chmm(N, Q, Y, discrete, coupled);
+ss = N*2;
+
+T = 3;
+
+engine = {};
+engine{end+1} = jtree_dbn_inf_engine(bnet);
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+engine{end+1} = pearl_unrolled_dbn_inf_engine(bnet, 'protocol', 'tree');
+
+inf_time = cmp_inference_dbn(bnet, engine, T)
+learning_time = cmp_learning_dbn(bnet, engine, T)
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mhmm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mhmm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,85 @@
+% Make an HMM with mixture of Gaussian observations
+% Q1 ---> Q2
+% / | / |
+% M1 | M2 |
+% \ v \ v
+% Y1 Y2
+% where Pr(m=j|q=i) is a multinomial and Pr(y|m,q) is a Gaussian
+
+%seed = 3;
+%rand('state', seed);
+%randn('state', seed);
+
+intra = zeros(3);
+intra(1,[2 3]) = 1;
+intra(2,3) = 1;
+inter = zeros(3);
+inter(1,1) = 1;
+n = 3;
+
+Q = 2; % num hidden states
+O = 2; % size of observed vector
+M = 2; % num mixture components per state
+
+ns = [Q M O];
+dnodes = [1 2];
+onodes = [3];
+eclass1 = [1 2 3];
+eclass2 = [4 2 3];
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'eclass1', eclass1, 'eclass2', eclass2, ...
+ 'observed', onodes);
+
+prior0 = normalise(rand(Q,1));
+transmat0 = mk_stochastic(rand(Q,Q));
+mixmat0 = mk_stochastic(rand(Q,M));
+mu0 = rand(O,Q,M);
+Sigma0 = repmat(eye(O), [1 1 Q M]);
+bnet.CPD{1} = tabular_CPD(bnet, 1, prior0);
+bnet.CPD{2} = tabular_CPD(bnet, 2, mixmat0);
+%% we set the cov prior to 0 to give same results as HMM toolbox
+%bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', mu0, 'cov', Sigma0, 'cov_prior_weight', 0);
+% new version of HMM toolbox uses the same default prior on Gaussians as BNT
+bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', mu0, 'cov', Sigma0);
+bnet.CPD{4} = tabular_CPD(bnet, 4, transmat0);
+
+
+
+T = 5; % fixed length sequences
+
+engine = {};
+engine{end+1} = hmm_inf_engine(bnet);
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+engine{end+1} = smoother_engine(hmm_2TBN_inf_engine(bnet));
+if 0
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+%engine{end+1} = frontier_inf_engine(bnet);
+engine{end+1} = bk_inf_engine(bnet, 'clusters', 'exact');
+engine{end+1} = jtree_dbn_inf_engine(bnet);
+end
+
+inf_time = cmp_inference_dbn(bnet, engine, T);
+
+ncases = 2;
+max_iter = 2;
+[learning_time, CPD, LL, cases] = cmp_learning_dbn(bnet, engine, T, 'ncases', ncases, 'max_iter', max_iter);
+
+% Compare to HMM toolbox
+
+data = zeros(O, T, ncases);
+for i=1:ncases
+ data(:,:,i) = reshape(cell2num(cases{i}(onodes,:)), [O T]);
+end
+tic;
+[LL2, prior2, transmat2, mu2, Sigma2, mixmat2] = ...
+ mhmm_em(data, prior0, transmat0, mu0, Sigma0, mixmat0, 'max_iter', max_iter);
+t=toc;
+disp(['HMM toolbox took ' num2str(t) ' seconds '])
+
+for e = 1:length(engine)
+ assert(approxeq(prior2, CPD{e,1}.CPT))
+ assert(approxeq(mixmat2, CPD{e,2}.CPT))
+ assert(approxeq(mu2, CPD{e,3}.mean))
+ assert(approxeq(Sigma2, CPD{e,3}.cov))
+ assert(approxeq(transmat2, CPD{e,4}.CPT))
+ assert(approxeq(LL2, LL{e}))
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mildew1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mildew1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+bnet = mk_mildew_dbn;
+
+T = 4;
+engine = {};
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+engine{end+1} = jtree_dbn_inf_engine(bnet);
+%engine{end+1} = hmm_inf_engine(bnet); % 8 is observed but has kids
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+%engine{end+1} = smoother_engine(hmm_2TBN_inf_engine(bnet));
+
+inf_time = cmp_inference_dbn(bnet, engine, T, 'check_ll', 0)
+%learning_time = cmp_learning_dbn(bnet, engine, T)
+
+S = struct(engine{1});
+S1 = struct(S.unrolled_engine);
+G = S1.jtree;
+%graph_to_dot(G, 'directed', 0, 'leftright', 1, ...
+% 'filename', '/home/eecs/murphyk/WP/Thesis/Figures/Inf/Mildew/jtree.dot')
+%!dot -Tps jtree.dot -o jtree.ps
+% The resulting ps file cannot be converted using ps2pdf.
+
+N = length(G);
+for i=1:N
+ for j=1:N
+ if G(i,j)
+ G(j,i)=1;
+ end
+ end
+end
+draw_graph(G)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_bat_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_bat_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,63 @@
+function [bnet, names] = mk_bat_dbn()
+% MK_BAT_DBN Make the BAT DBN
+% [bnet, names] = mk_bat_dbn()
+% See
+% - Forbes, Huang, Kanazawa and Russell, "The BATmobile: Towards a Bayesian Automated Taxi", IJCAI 95
+% - Boyen and Koller, "Tractable Inference for Complex Stochastic Processes", UAI98.
+
+names = {'LeftClr', 'RightClr', 'LatAct', 'Xdot', 'InLane', 'FwdAct', ...
+ 'Ydot', 'Stopped', 'EngStatus', 'FBStatus', ...
+ 'LeftClrSens', 'RightClrSens', 'TurnSignalSens', 'XdotSens', 'YdotSens', ...
+ 'FYdotDiffSens', 'FclrSens', 'BXdotSens', 'BclrSens', 'BYdotDiffSens', ...
+ 'SensorValid', 'FYdotDiff', 'FcloseSlow', 'Fclr', 'BXdot', 'BcloseFast', 'Bclr', 'BYdotDiff'};
+ss = length(names);
+
+intrac = {...
+ 'LeftClr', 'LeftClrSens';
+ 'RightClr', 'RightClrSens';
+ 'LatAct', 'TurnSignalSens'; 'LatAct', 'Xdot';
+ 'Xdot', 'XdotSens';
+ 'FwdAct', 'Ydot';
+ 'Ydot', 'YdotSens'; 'Ydot', 'Stopped';
+ 'EngStatus', 'Ydot'; 'EngStatus', 'FYdotDiff'; 'EngStatus', 'Fclr'; 'EngStatus', 'BXdot';
+ 'SensorValid', 'XdotSens'; 'SensorValid', 'YdotSens';
+ 'FYdotDiff', 'FYdotDiffSens'; 'FYdotDiff', 'FcloseSlow';
+ 'FcloseSlow', 'FBStatus';
+ 'Fclr', 'FclrSens'; 'Fclr', 'FcloseSlow';
+ 'BXdot', 'BXdotSens';
+ 'Bclr', 'BclrSens'; 'Bclr', 'BXdot'; 'Bclr', 'BcloseFast';
+ 'BcloseFast', 'FBStatus';
+ 'BYdotDiff', 'BYdotDiffSens'; 'BYdotDiff', 'BcloseFast'};
+[intra, names] = mk_adj_mat(intrac, names, 1);
+
+
+interc = {...
+ 'LeftClr', 'LeftClr'; 'LeftClr', 'LatAct';
+ 'RightClr', 'RightClr'; 'RightClr', 'LatAct';
+ 'LatAct', 'LatAct'; 'LatAct', 'FwdAct';
+ 'Xdot', 'Xdot'; 'Xdot', 'InLane';
+ 'InLane', 'InLane'; 'InLane', 'LatAct';
+ 'FwdAct', 'FwdAct';
+ 'Ydot', 'Ydot';
+ 'Stopped', 'Stopped';
+ 'EngStatus', 'EngStatus';
+ 'FBStatus', 'FwdAct'; 'FBStatus', 'LatAct'};
+inter = mk_adj_mat(interc, names, 0);
+
+obs = {'LeftClrSens', 'RightClrSens', 'TurnSignalSens', 'XdotSens', 'YdotSens', 'FYdotDiffSens', ...
+ 'FclrSens', 'BXdotSens', 'BclrSens', 'BYdotDiffSens'};
+
+for i=1:length(obs)
+ onodes(i) = strmatch(obs{i}, names); %stringmatch(obs{i}, names);
+end
+onodes = sort(onodes);
+
+dnodes = 1:ss;
+ns = 2*ones(1,ss); % binary nodes
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'observed', onodes, 'eclass2', (1:ss)+ss);
+
+% make rnd params
+for i=1:2*ss
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_chmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_chmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,103 @@
+function bnet = mk_chmm(N, Q, Y, discrete_obs, coupled, CPD)
+% MK_CHMM Make a coupled Hidden Markov Model
+%
+% There are N hidden nodes, each connected to itself and its two nearest neighbors in the next
+% slice (apart from the edges, where there is 1 nearest neighbor).
+%
+% Example: If N = 3, the hidden backbone is as follows, where all arrows point to the righ+t
+%
+% X1--X2
+% \/
+% /\
+% X2--X2
+% \/
+% /\
+% X3--X3
+%
+% Each hidden node has a "private" observed child (not shown).
+%
+% BNET = MK_CHMM(N, Q, Y)
+% Each hidden node is discrete and has Q values.
+% Each observed node is a Gaussian vector of length Y.
+%
+% BNET = MK_CHMM(N, Q, Y, DISCRETE_OBS)
+% If discrete_obs = 1, the observations are discrete (values in {1, .., Y}).
+%
+% BNET = MK_CHMM(N, Q, Y, DISCRETE_OBS, COUPLED)
+% If coupled = 0, the chains are not coupled, i.e., we make N parallel HMMs.
+%
+% BNET = MK_CHMM(N, Q, Y, DISCRETE_OBS, COUPLED, CPDs)
+% means use the specified CPD structures instead of creating random params.
+% CPD{i}.CPT, i=1:N specifies the prior
+% CPD{i}.CPT, i=2N+1:3N specifies the transition model
+% CPD{i}.mean, CPD{i}.cov, i=N+1:2N specifies the observation model if Gaussian
+% CPD{i}.CPT, i=N+1:2N if discrete
+
+
+if nargin < 2, Q = 2; end
+if nargin < 3, Y = 1; end
+if nargin < 4, discrete_obs = 0; end
+if nargin < 5, coupled = 1; end
+if nargin < 6, rnd = 1; else rnd = 0; end
+
+ss = N*2;
+hnodes = 1:N;
+onodes = (1:N)+N;
+
+intra = zeros(ss);
+for i=1:N
+ intra(hnodes(i), onodes(i))=1;
+end
+
+inter = zeros(ss);
+if coupled
+ for i=1:N
+ inter(i, max(i-1,1):min(i+1,N))=1;
+ end
+else
+ inter(1:N, 1:N) = eye(N);
+end
+
+ns = [Q*ones(1,N) Y*ones(1,N)];
+
+eclass1 = [hnodes onodes];
+eclass2 = [hnodes+ss onodes];
+if discrete_obs
+ dnodes = 1:ss;
+else
+ dnodes = hnodes;
+end
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'eclass1', eclass1, 'eclass2', eclass2, ...
+ 'observed', onodes);
+
+if rnd
+ for i=hnodes(:)'
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+ for i=onodes(:)'
+ if discrete_obs
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ else
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+ end
+ end
+ for i=hnodes(:)'+ss
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+else
+ for i=hnodes(:)'
+ bnet.CPD{i} = tabular_CPD(bnet, i, CPD{i}.CPT);
+ end
+ for i=onodes(:)'
+ if discrete_obs
+ bnet.CPD{i} = tabular_CPD(bnet, i, CPD{i}.CPT);
+ else
+ bnet.CPD{i} = gaussian_CPD(bnet, i, CPD{i}.mean, CPD{i}.cov);
+ end
+ end
+ for i=hnodes(:)'+ss
+ bnet.CPD{i} = tabular_CPD(bnet, i, CPD{i}.CPT);
+ end
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_collage_from_clqs.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_collage_from_clqs.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function mk_collage_from_clqs(dir, cliques)
+
+% For use with mk_ps_from_clqs.
+% This generates a latex file that glues all the .ps files
+% into one big figure.
+
+cd(dir)
+C = length(cliques);
+
+ncols = 4;
+width = 1.5;
+fid = fopen('collage.tex', 'w');
+fprintf(fid, '\\documentclass{article}\n');
+fprintf(fid, '\\usepackage{psfig}\n');
+fprintf(fid, '\\begin{document}\n');
+fprintf(fid, '\\centerline{\n');
+fprintf(fid, '\\begin{tabular}{');
+for col=1:ncols, fprintf(fid, 'c'); end
+fprintf(fid, '}\n');
+c = 1;
+for row = 1:floor(C/ncols)
+ for col=1:ncols-1
+ fname = sprintf('%s/clq%d.ps', dir, c);
+ fprintf(fid, '\\psfig{file=%s,width=%3fin} & \n', fname, width);
+ c = c + 1;
+ end
+ fname = sprintf('%s/clq%d.ps', dir, c);
+ fprintf(fid, '\\psfig{file=%s,width=%3fin} \\\\ \n', fname, width);
+ c = c + 1;
+end
+% last row
+while (c <= C)
+ fname = sprintf('%s/clq%d.ps', dir, c);
+ fprintf(fid, '\\psfig{file=%s,width=%3fin} & \n', fname, width);
+ c = c + 1;
+end
+fprintf(fid, '\\end{tabular}\n');
+fprintf(fid, '}\n');
+fprintf(fid, '\\end{document}');
+fclose(fid);
+
+!latex collage.tex &
+!dvips -o collage.ps collage.dvi &
+!ghostview collage.ps &
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_fhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_fhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,58 @@
+function bnet = mk_fhmm(N, Q, Y, discrete_obs)
+% MK_FHMM Make a factorial Hidden Markov Model
+%
+% There are N independent parallel hidden chains, each connected to the output
+%
+% e.g., N = 2 (vertical/diagonal edges point down)
+%
+% A1--->A2
+% | B1--|->B2
+% | / |/
+% Y1 Y2
+%
+% [bnet, onode] = mk_chmm(n, q, y, discrete_obs)
+%
+% Each hidden node is discrete and has Q values.
+% If discrete_obs = 1, each observed node is discrete and has values 1..Y.
+% If discrete_obs = 0, each observed node is a Gaussian vector of length Y.
+
+if nargin < 2, Q = 2; end
+if nargin < 3, Y = 2; end
+if nargin < 4, discrete_obs = 1; end
+
+ss = N+1;
+hnodes = 1:N;
+onode = N+1;
+
+intra = zeros(ss);
+intra(hnodes, onode) = 1;
+
+inter = eye(ss);
+inter(onode,onode) = 0;
+
+ns = [Q*ones(1,N) Y];
+
+eclass1 = [hnodes onode];
+eclass2 = [hnodes+ss onode];
+if discrete_obs
+ dnodes = 1:ss;
+else
+ dnodes = hnodes;
+end
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'eclass1', eclass1, 'eclass2', eclass2, ...
+ 'observed', onode);
+
+for i=hnodes(:)'
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+i = onode;
+if discrete_obs
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+else
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+end
+for i=hnodes(:)'+ss
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_mildew_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_mildew_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+function bnet = mk_mildew_dbn()
+
+% DBN for foreacasting the gross yield of wheat based on climatic data,
+% observations of leaf area index (LAI) and extension of mildew,
+% and knowledge of amount of fungicides used and time of usage.
+% From Kjaerulff '95.
+
+Fungi=1; Mildew=2; LAI=3; Precip=4; Temp=5; Micro=6; Solar=7; Photo=8; Dry=9;
+n = 9;
+intra = zeros(n,n);
+intra(Mildew, LAI)=1;
+intra(LAI,[Micro Photo])=1;
+intra(Precip,Micro)=1;
+intra(Temp,[Micro Photo])=1;
+intra(Solar,Photo)=1;
+intra(Photo,Dry)=1;
+
+inter = zeros(n,n);
+inter(Fungi,Mildew)=1;
+inter(Mildew,Mildew)=1;
+inter(LAI,LAI)=1;
+inter(Micro,Mildew)=1;
+inter(Dry,Dry)=1;
+
+ns = 2*ones(1,n);
+bnet = mk_dbn(intra, inter, ns, 'observed', [Photo]);
+
+for e=1:max(bnet.equiv_class(:))
+ i = bnet.rep_of_eclass(e);
+ bnet.CPD{e} = tabular_CPD(bnet,i);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_orig_bat_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_orig_bat_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,198 @@
+function [bnet, names] = mk_orig_bat_dbn()
+% MK_BAT_DBN Make the BAT DBN
+% [bnet, names] = mk_bat_dbn()
+% See
+% - Forbes, Huang, Kanazawa and Russell, "The BATmobile: Towards a Bayesian Automated Taxi", IJCAI 95
+% - Boyen and Koller, "Tractable Inference for Complex Stochastic Processes", UAI98.
+
+names = {'LeftClr', 'RightClr', 'LatAct', 'Xdot', 'InLane', 'FwdAct', ...
+ 'Ydot', 'Stopped', 'EngStatus', 'FBStatus', ...
+ 'LeftClrSens', 'RightClrSens', 'TurnSignalSens', 'XdotSens', 'YdotSens', ...
+ 'FYdotDiffSens', 'FclrSens', 'BXdotSens', 'BclrSens', 'BYdotDiffSens', ...
+ 'SensorValid', 'FYdotDiff', 'FcloseSlow', 'Fclr', 'BXdot', 'BcloseFast', 'Bclr', 'BYdotDiff'};
+ss = length(names);
+
+intrac = {...
+ 'LeftClr', 'LeftClrSens';
+ 'RightClr', 'RightClrSens';
+ 'LatAct', 'TurnSignalSens'; 'LatAct', 'Xdot';
+ 'Xdot', 'XdotSens';
+ 'FwdAct', 'Ydot';
+ 'Ydot', 'YdotSens'; 'Ydot', 'Stopped';
+ 'EngStatus', 'Ydot'; 'EngStatus', 'FYdotDiff'; 'EngStatus', 'Fclr'; 'EngStatus', 'BXdot';
+ 'SensorValid', 'XdotSens'; 'SensorValid', 'YdotSens';
+ 'FYdotDiff', 'FYdotDiffSens'; 'FYdotDiff', 'FcloseSlow';
+ 'FcloseSlow', 'FBStatus';
+ 'Fclr', 'FclrSens'; 'Fclr', 'FcloseSlow';
+ 'BXdot', 'BXdotSens';
+ 'Bclr', 'BclrSens'; 'Bclr', 'BXdot'; 'Bclr', 'BcloseFast';
+ 'BcloseFast', 'FBStatus';
+ 'BYdotDiff', 'BYdotDiffSens'; 'BYdotDiff', 'BcloseFast'};
+[intra, names] = mk_adj_mat(intrac, names, 1);
+
+
+interc = {...
+ 'LeftClr', 'LeftClr'; 'LeftClr', 'LatAct';
+ 'RightClr', 'RightClr'; 'RightClr', 'LatAct';
+ 'LatAct', 'LatAct'; 'LatAct', 'FwdAct';
+ 'Xdot', 'Xdot'; 'Xdot', 'InLane';
+ 'InLane', 'InLane'; 'InLane', 'LatAct';
+ 'FwdAct', 'FwdAct';
+ 'Ydot', 'Ydot';
+ 'Stopped', 'Stopped';
+ 'EngStatus', 'EngStatus';
+ 'FBStatus', 'FwdAct'; 'FBStatus', 'LatAct'};
+inter = mk_adj_mat(interc, names, 0);
+
+obs = {'LeftClrSens', 'RightClrSens', 'TurnSignalSens', 'XdotSens', 'YdotSens', 'FYdotDiffSens', ...
+ 'FclrSens', 'BXdotSens', 'BclrSens', 'BYdotDiffSens'};
+
+for i=1:length(obs)
+ onodes(i) = stringmatch(obs{i}, names);
+end
+onodes = sort(onodes);
+
+dnodes = 1:ss;
+ns = zeros(1,ss);
+
+ns(stringmatch('LeftClr', names)) = 2;
+ns(stringmatch('RightClr', names)) = 2;
+ns(stringmatch('LatAct', names)) = 3;
+ns(stringmatch('Xdot', names)) = 7;
+ns(stringmatch('InLane', names)) = 2;
+ns(stringmatch('FwdAct', names)) = 3;
+ns(stringmatch('Ydot', names)) = 11;
+ns(stringmatch('Stopped', names)) = 2;
+ns(stringmatch('EngStatus', names)) = 2;
+ns(stringmatch('FBStatus', names)) = 3;
+ns(stringmatch('LeftClrSens', names)) = 2;
+ns(stringmatch('RightClrSens', names)) = 2;
+ns(stringmatch('TurnSignalSens', names)) = 3;
+ns(stringmatch('XdotSens', names)) = 7;
+ns(stringmatch('YdotSens', names)) = 11;
+ns(stringmatch('FYdotDiffSens', names)) = 8;
+ns(stringmatch('FclrSens', names)) = 20;
+ns(stringmatch('BXdotSens', names)) = 8;
+ns(stringmatch('BclrSens', names)) = 20;
+ns(stringmatch('BYdotDiffSens', names)) = 8;
+ns(stringmatch('SensorValid', names)) = 2;
+ns(stringmatch('FYdotDiff', names)) = 4;
+ns(stringmatch('FcloseSlow', names)) = 2;
+ns(stringmatch('Fclr', names)) = 3;
+ns(stringmatch('BXdot', names)) = 8;
+ns(stringmatch('BcloseFast', names)) = 2;
+ns(stringmatch('Bclr', names)) = 3;
+ns(stringmatch('BYdotDiff', names)) = 4;
+
+%ns = 2*ones(1,ss);
+
+
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'observed', onodes, 'eclass2', (1:ss)+ss);
+
+% make unif params
+for i=1:2*ss
+ bnet.CPD{i} = tabular_CPD(bnet, i, 'CPT', 'unif');
+end
+
+i = stringmatch('LeftClr', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [0.99 0.01 0.01 0.99]);
+
+i = stringmatch('RightClr', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [0.99 0.01 0.01 0.99]);
+
+i = stringmatch('LatAct', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [0.166666666666667 0.975609756097561 0.111111111111111 0.961538461538462 0.00980392156862745 0.0380952380952381 0.00952380952380952 0.037037037037037 0.166666666666667 0.444444444444444 0.0048780487804878 0.0192307692307692 0.0384615384615385 0.888888888888889 0.0344827586206897 0.87719298245614 0.00819672131147541 0.032 0.008 0.03125 0.0384615384615385 0.137931034482759 0.00444444444444444 0.0175438596491228 0.166666666666667 0.975609756097561 0.111111111111111 0.961538461538462 0.166666666666667 0.8 0.04 0.454545454545455 0.166666666666667 0.444444444444444 0.0048780487804878 0.0192307692307692 0.0384615384615385 0.888888888888889 0.0344827586206897 0.87719298245614 0.0384615384615385 0.444444444444444 0.0222222222222222 0.3125 0.0384615384615385 0.137931034482759 0.00444444444444444 0.0175438596491228 0.166666666666667 0.975609756097561 0.111111111111111 0.961538461538462 0.166666666666667 0.8 0.04 0.454545454545455 0.166666666666667 0.444444444444444 0.0048780487804878 0.0192307692307692 0.0384615384615385 0.888888888888889 0.0344827586206897 0.87719298245614 0.0384615384615385 0.444444444444444 0.0222222222222222 0.3125 0.0384615384615385 0.137931034482759 0.00444444444444444 0.0175438596491228 0.666666666666667 0.0195121951219512 0.444444444444444 0.0192307692307692 0.980392156862745 0.952380952380952 0.952380952380952 0.925925925925926 0.666666666666667 0.444444444444444 0.0195121951219512 0.0192307692307692 0.923076923076923 0.106666666666667 0.827586206896552 0.105263157894737 0.983606557377049 0.96 0.96 0.9375 0.923076923076923 0.827586206896552 0.106666666666667 0.105263157894737 0.666666666666667 0.0195121951219512 0.444444444444444 0.0192307692307692 0.666666666666667 0.16 0.16 0.0909090909090909 0.666666666666667 0.444444444444444 0.0195121951219512 0.0192307692307692 0.923076923076923 0.106666666666667 0.827586206896552 0.105263157894737 0.923076923076923 0.533333333333333 0.533333333333333 0.375 0.923076923076923 0.827586206896552 0.106666666666667 0.105263157894737 0.666666666666667 0.0195121951219512 0.444444444444444 0.0192307692307692 0.666666666666667 0.16 0.16 0.0909090909090909 0.666666666666667 0.444444444444444 0.0195121951219512 0.0192307692307692 0.923076923076923 0.106666666666667 0.827586206896552 0.105263157894737 0.923076923076923 0.533333333333333 0.533333333333333 0.375 0.923076923076923 0.827586206896552 0.106666666666667 0.105263157894737 0.166666666666667 0.0048780487804878 0.444444444444444 0.0192307692307692 0.00980392156862745 0.00952380952380952 0.0380952380952381 0.037037037037037 0.166666666666667 0.111111111111111 0.975609756097561 0.961538461538462 0.0384615384615385 0.00444444444444444 0.137931034482759 0.0175438596491228 0.00819672131147541 0.008 0.032 0.03125 0.0384615384615385 0.0344827586206897 0.888888888888889 0.87719298245614 0.166666666666667 0.0048780487804878 0.444444444444444 0.0192307692307692 0.166666666666667 0.04 0.8 0.454545454545455 0.166666666666667 0.111111111111111 0.975609756097561 0.961538461538462 0.0384615384615385 0.00444444444444444 0.137931034482759 0.0175438596491228 0.0384615384615385 0.0222222222222222 0.444444444444444 0.3125 0.0384615384615385 0.0344827586206897 0.888888888888889 0.87719298245614 0.166666666666667 0.0048780487804878 0.444444444444444 0.0192307692307692 0.166666666666667 0.04 0.8 0.454545454545455 0.166666666666667 0.111111111111111 0.975609756097561 0.961538461538462 0.0384615384615385 0.00444444444444444 0.137931034482759 0.0175438596491228 0.0384615384615385 0.0222222222222222 0.444444444444444 0.3125 0.0384615384615385 0.0344827586206897 0.888888888888889 0.87719298245614]);
+
+i = stringmatch('Xdot', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [0.497512437810945 0.115207373271889 0.0564971751412429 0.0290697674418605 0.075187969924812 0.0300751879699248 0.0298507462686567 0.0980392156862745 0.0392156862745098 0.043956043956044 0.0294117647058824 0.0283687943262411 0.0392156862745098 0.0392156862745098 0.0373134328358209 0.0300751879699248 0.0300751879699248 0.0232558139534884 0.0225988700564972 0.0184331797235023 0.0199004975124378 0.373134328358209 0.460829493087558 0.282485875706215 0.290697674418605 0.37593984962406 0.075187969924812 0.0373134328358209 0.490196078431373 0.0980392156862745 0.043956043956044 0.0294117647058824 0.0283687943262411 0.0392156862745098 0.0392156862745098 0.0746268656716418 0.037593984962406 0.0300751879699248 0.0232558139534884 0.0225988700564972 0.0184331797235023 0.0199004975124378 0.0497512437810945 0.345622119815668 0.564971751412429 0.581395348837209 0.37593984962406 0.37593984962406 0.0746268656716418 0.245098039215686 0.490196078431373 0.21978021978022 0.0735294117647059 0.0354609929078014 0.0490196078431373 0.0392156862745098 0.373134328358209 0.075187969924812 0.037593984962406 0.0232558139534884 0.0225988700564972 0.0184331797235023 0.0199004975124378 0.0199004975124378 0.0230414746543779 0.0282485875706215 0.0290697674418605 0.075187969924812 0.37593984962406 0.373134328358209 0.0490196078431373 0.245098039215686 0.54945054945055 0.735294117647059 0.709219858156028 0.245098039215686 0.0490196078431373 0.373134328358209 0.37593984962406 0.075187969924812 0.0290697674418605 0.0282485875706215 0.0230414746543779 0.0199004975124378 0.0199004975124378 0.0184331797235023 0.0225988700564972 0.0232558139534884 0.037593984962406 0.075187969924812 0.373134328358209 0.0392156862745098 0.0490196078431373 0.0549450549450549 0.0735294117647059 0.141843971631206 0.490196078431373 0.245098039215686 0.0746268656716418 0.37593984962406 0.37593984962406 0.581395348837209 0.564971751412429 0.345622119815668 0.0497512437810945 0.0199004975124378 0.0184331797235023 0.0225988700564972 0.0232558139534884 0.0300751879699248 0.037593984962406 0.0746268656716418 0.0392156862745098 0.0392156862745098 0.043956043956044 0.0294117647058824 0.0283687943262411 0.0980392156862745 0.490196078431373 0.0373134328358209 0.075187969924812 0.37593984962406 0.290697674418605 0.282485875706215 0.460829493087558 0.373134328358209 0.0199004975124378 0.0184331797235023 0.0225988700564972 0.0232558139534884 0.0300751879699248 0.0300751879699248 0.0373134328358209 0.0392156862745098 0.0392156862745098 0.043956043956044 0.0294117647058824 0.0283687943262411 0.0392156862745098 0.0980392156862745 0.0298507462686567 0.0300751879699248 0.075187969924812 0.0290697674418605 0.0564971751412429 0.115207373271889 0.497512437810945]);
+
+i = stringmatch('InLane', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [0.5 0.3 0.5 0.3 0.5 0.3 0.9 0.01 0.5 0.3 0.5 0.3 0.5 0.3 0.5 0.7 0.5 0.7 0.5 0.7 0.1 0.99 0.5 0.7 0.5 0.7 0.5 0.7]);
+
+i = stringmatch('FwdAct', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [0.4 0.4 0.4 0.949050949050949 0.949050949050949 0.949050949050949 0.4 0.4 0.4 0.6 0.1 0.1 0.949050949050949 0.949050949050949 0.949050949050949 0.05 0.05 0.05 0.4 0.4 0.4 0.949050949050949 0.949050949050949 0.949050949050949 0.4 0.4 0.4 0.4 0.4 0.4 0.04995004995005 0.04995004995005 0.04995004995005 0.4 0.4 0.4 0.3 0.8 0.3 0.04995004995005 0.04995004995005 0.04995004995005 0.7 0.7 0.7 0.4 0.4 0.4 0.04995004995005 0.04995004995005 0.04995004995005 0.4 0.4 0.4 0.2 0.2 0.2 0.000999000999000999 0.000999000999000999 0.000999000999000999 0.2 0.2 0.2 0.1 0.1 0.6 0.000999000999000999 0.000999000999000999 0.000999000999000999 0.25 0.25 0.25 0.2 0.2 0.2 0.000999000999000999 0.000999000999000999 0.000999000999000999 0.2 0.2 0.2]);
+
+i = stringmatch('Ydot', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.999880014398272 0.72463768115942 0.595238095238095 0.230414746543779 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.595238095238095 0.230414746543779 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.230414746543779 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.0144927536231884 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 9.99880014398272e-005 0.144927536231884 0.297619047619048 0.460829493087558 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.297619047619048 0.460829493087558 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.460829493087558 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.0144927536231884 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 0.0144927536231884 0.0595238095238095 0.230414746543779 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.0595238095238095 0.230414746543779 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.230414746543779 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.0144927536231884 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 9.99880014398272e-006 0.0144927536231884 0.00595238095238095 0.0460829493087558 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.0460829493087558 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.0460829493087558 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.0144927536231884 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0144927536231884 0.00595238095238095 0.00460829493087558 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.00460829493087558 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.00460829493087558 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.0144927536231884 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0144927536231884 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00442477876106195 0.00460829493087558 0.00595238095238095 0.0144927536231884 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0144927536231884 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00460829493087558 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00460829493087558 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0442477876106195 0.00460829493087558 0.00595238095238095 0.0144927536231884 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0144927536231884 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0460829493087558 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0460829493087558 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.221238938053097 0.0460829493087558 0.00595238095238095 0.0144927536231884 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0144927536231884 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.230414746543779 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.230414746543779 0.0595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.442477876106195 0.230414746543779 0.0595238095238095 0.0144927536231884 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0144927536231884 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.460829493087558 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.460829493087558 0.297619047619048 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.221238938053097 0.460829493087558 0.297619047619048 0.144927536231884 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0144927536231884 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.230414746543779 0.00595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.230414746543779 0.595238095238095 0.00460829493087558 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.00442477876106195 0.0442477876106195 0.230414746543779 0.595238095238095 0.72463768115942]);
+
+i = stringmatch('Stopped', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [0.0 0.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0]);
+
+i = stringmatch('EngStatus', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [0.9 1.0e-006 0.1 0.999999]);
+
+i = stringmatch('FBStatus', names)+ss;
+bnet.CPD{i} = tabular_CPD(bnet, i, [1.0 0.0 0.0 0.0 0.0 1.0 0.0 1.0 0.0 0.0 1.0 0.0]);
+
+i = stringmatch('SensorValid', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [1.0e-004 0.9999]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [1.0e-004 0.9999]);
+
+i = stringmatch('FYdotDiff', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.908182726364545 0.238095238095238 0.000908182726364545 0.476190476190476 9.08182726364545e-005 0.238095238095238 0.0908182726364545 0.0476190476190476]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.908182726364545 0.238095238095238 0.000908182726364545 0.476190476190476 9.08182726364545e-005 0.238095238095238 0.0908182726364545 0.0476190476190476]);
+
+i = stringmatch('FcloseSlow', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [1.0 1.0 1.0 1.0 1.0 1.0 0.0 1.0 1.0 1.0 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [1.0 1.0 1.0 1.0 1.0 1.0 0.0 1.0 1.0 1.0 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0]);
+
+i = stringmatch('Fclr', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.000499750124937531 0.142857142857143 0.499750124937531 0.285714285714286 0.499750124937531 0.571428571428571]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.000499750124937531 0.142857142857143 0.499750124937531 0.285714285714286 0.499750124937531 0.571428571428571]);
+
+i = stringmatch('BXdot', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.00980392156862745 0.00980392156862745 0.00980392156862745 0.00980392156862745 0.0454545454545455 0.00980392156862745 0.0490196078431373 0.0490196078431373 0.0490196078431373 0.0490196078431373 0.136363636363636 0.0490196078431373 0.196078431372549 0.196078431372549 0.196078431372549 0.196078431372549 0.136363636363636 0.196078431372549 0.392156862745098 0.392156862745098 0.392156862745098 0.392156862745098 0.181818181818182 0.392156862745098 0.196078431372549 0.196078431372549 0.196078431372549 0.196078431372549 0.136363636363636 0.196078431372549 0.0490196078431373 0.0490196078431373 0.0490196078431373 0.0490196078431373 0.136363636363636 0.0490196078431373 0.00980392156862745 0.00980392156862745 0.00980392156862745 0.00980392156862745 0.0454545454545455 0.00980392156862745 0.0980392156862745 0.0980392156862745 0.0980392156862745 0.0980392156862745 0.181818181818182 0.0980392156862745]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.00980392156862745 0.00980392156862745 0.00980392156862745 0.00980392156862745 0.0454545454545455 0.00980392156862745 0.0490196078431373 0.0490196078431373 0.0490196078431373 0.0490196078431373 0.136363636363636 0.0490196078431373 0.196078431372549 0.196078431372549 0.196078431372549 0.196078431372549 0.136363636363636 0.196078431372549 0.392156862745098 0.392156862745098 0.392156862745098 0.392156862745098 0.181818181818182 0.392156862745098 0.196078431372549 0.196078431372549 0.196078431372549 0.196078431372549 0.136363636363636 0.196078431372549 0.0490196078431373 0.0490196078431373 0.0490196078431373 0.0490196078431373 0.136363636363636 0.0490196078431373 0.00980392156862745 0.00980392156862745 0.00980392156862745 0.00980392156862745 0.0454545454545455 0.00980392156862745 0.0980392156862745 0.0980392156862745 0.0980392156862745 0.0980392156862745 0.181818181818182 0.0980392156862745]);
+
+i = stringmatch('BcloseFast', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0]);
+
+i = stringmatch('Bclr', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.142857142857143 0.285714285714286 0.571428571428571]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.142857142857143 0.285714285714286 0.571428571428571]);
+
+i = stringmatch('BYdotDiff', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.238095238095238 0.476190476190476 0.238095238095238 0.0476190476190476]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.238095238095238 0.476190476190476 0.238095238095238 0.0476190476190476]);
+
+i = stringmatch('LeftClrSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.909090909090909 0.0909090909090909 0.0909090909090909 0.909090909090909]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.909090909090909 0.0909090909090909 0.0909090909090909 0.909090909090909]);
+
+i = stringmatch('RightClrSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.909090909090909 0.0909090909090909 0.0909090909090909 0.909090909090909]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.909090909090909 0.0909090909090909 0.0909090909090909 0.909090909090909]);
+
+i = stringmatch('TurnSignalSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.75 0.000998003992015968 0.01 0.24 0.998003992015968 0.24 0.01 0.000998003992015968 0.75]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.75 0.000998003992015968 0.01 0.24 0.998003992015968 0.24 0.01 0.000998003992015968 0.75]);
+
+i = stringmatch('XdotSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.142857142857143 0.897666068222621 0.142857142857143 0.0824402308326463 0.142857142857143 0.00818330605564648 0.142857142857143 0.000818330605564648 0.142857142857143 0.000818330605564648 0.142857142857143 0.000824402308326463 0.142857142857143 0.000897666068222621 0.142857142857143 0.0897666068222621 0.142857142857143 0.824402308326463 0.142857142857143 0.0818330605564648 0.142857142857143 0.00818330605564648 0.142857142857143 0.000818330605564648 0.142857142857143 0.000824402308326463 0.142857142857143 0.000897666068222621 0.142857142857143 0.00897666068222621 0.142857142857143 0.0824402308326463 0.142857142857143 0.818330605564648 0.142857142857143 0.0818330605564648 0.142857142857143 0.00818330605564648 0.142857142857143 0.000824402308326463 0.142857142857143 0.000897666068222621 0.142857142857143 0.000897666068222621 0.142857142857143 0.00824402308326463 0.142857142857143 0.0818330605564648 0.142857142857143 0.818330605564648 0.142857142857143 0.0818330605564648 0.142857142857143 0.00824402308326463 0.142857142857143 0.000897666068222621 0.142857142857143 0.000897666068222621 0.142857142857143 0.000824402308326463 0.142857142857143 0.00818330605564648 0.142857142857143 0.0818330605564648 0.142857142857143 0.818330605564648 0.142857142857143 0.0824402308326463 0.142857142857143 0.00897666068222621 0.142857142857143 0.000897666068222621 0.142857142857143 0.000824402308326463 0.142857142857143 0.000818330605564648 0.142857142857143 0.00818330605564648 0.142857142857143 0.0818330605564648 0.142857142857143 0.824402308326463 0.142857142857143 0.0897666068222621 0.142857142857143 0.000897666068222621 0.142857142857143 0.000824402308326463 0.142857142857143 0.000818330605564648 0.142857142857143 0.000818330605564648 0.142857142857143 0.00818330605564648 0.142857142857143 0.0824402308326463 0.142857142857143 0.897666068222621]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.142857142857143 0.897666068222621 0.142857142857143 0.0824402308326463 0.142857142857143 0.00818330605564648 0.142857142857143 0.000818330605564648 0.142857142857143 0.000818330605564648 0.142857142857143 0.000824402308326463 0.142857142857143 0.000897666068222621 0.142857142857143 0.0897666068222621 0.142857142857143 0.824402308326463 0.142857142857143 0.0818330605564648 0.142857142857143 0.00818330605564648 0.142857142857143 0.000818330605564648 0.142857142857143 0.000824402308326463 0.142857142857143 0.000897666068222621 0.142857142857143 0.00897666068222621 0.142857142857143 0.0824402308326463 0.142857142857143 0.818330605564648 0.142857142857143 0.0818330605564648 0.142857142857143 0.00818330605564648 0.142857142857143 0.000824402308326463 0.142857142857143 0.000897666068222621 0.142857142857143 0.000897666068222621 0.142857142857143 0.00824402308326463 0.142857142857143 0.0818330605564648 0.142857142857143 0.818330605564648 0.142857142857143 0.0818330605564648 0.142857142857143 0.00824402308326463 0.142857142857143 0.000897666068222621 0.142857142857143 0.000897666068222621 0.142857142857143 0.000824402308326463 0.142857142857143 0.00818330605564648 0.142857142857143 0.0818330605564648 0.142857142857143 0.818330605564648 0.142857142857143 0.0824402308326463 0.142857142857143 0.00897666068222621 0.142857142857143 0.000897666068222621 0.142857142857143 0.000824402308326463 0.142857142857143 0.000818330605564648 0.142857142857143 0.00818330605564648 0.142857142857143 0.0818330605564648 0.142857142857143 0.824402308326463 0.142857142857143 0.0897666068222621 0.142857142857143 0.000897666068222621 0.142857142857143 0.000824402308326463 0.142857142857143 0.000818330605564648 0.142857142857143 0.000818330605564648 0.142857142857143 0.00818330605564648 0.142857142857143 0.0824402308326463 0.142857142857143 0.897666068222621]);
+
+i = stringmatch('YdotSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.0909090909090909 0.894454382826476 0.0909090909090909 0.0821692686935086 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.0894454382826476 0.0909090909090909 0.821692686935086 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.00894454382826476 0.0909090909090909 0.0821692686935086 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.00821692686935086 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0821692686935086 0.0909090909090909 0.00894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.821692686935086 0.0909090909090909 0.0894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0821692686935086 0.0909090909090909 0.894454382826476]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.0909090909090909 0.894454382826476 0.0909090909090909 0.0821692686935086 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.0894454382826476 0.0909090909090909 0.821692686935086 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.00894454382826476 0.0909090909090909 0.0821692686935086 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.00821692686935086 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.00821692686935086 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.815660685154976 0.0909090909090909 0.0821692686935086 0.0909090909090909 0.00894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0815660685154976 0.0909090909090909 0.821692686935086 0.0909090909090909 0.0894454382826476 0.0909090909090909 0.000894454382826476 0.0909090909090909 0.000821692686935086 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.000815660685154976 0.0909090909090909 0.00815660685154975 0.0909090909090909 0.0821692686935086 0.0909090909090909 0.894454382826476]);
+
+i = stringmatch('FYdotDiffSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.53191206429753 0.00806445109313635 0.0053191206429753 9.9930048965724e-005 0.265956032148765 0.00806445109313635 0.0053191206429753 9.9930048965724e-005 0.132978016074383 0.0806445109313634 0.0053191206429753 9.9930048965724e-005 0.053191206429753 0.806445109313635 0.053191206429753 9.9930048965724e-005 0.0053191206429753 0.0806445109313634 0.132978016074383 9.9930048965724e-005 0.0053191206429753 0.00806445109313635 0.265956032148765 9.9930048965724e-005 0.0053191206429753 0.00806445109313635 0.53191206429753 9.9930048965724e-005 5.3191206429753e-006 8.06445109313635e-006 5.3191206429753e-006 0.99930048965724]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.53191206429753 0.00806445109313635 0.0053191206429753 9.9930048965724e-005 0.265956032148765 0.00806445109313635 0.0053191206429753 9.9930048965724e-005 0.132978016074383 0.0806445109313634 0.0053191206429753 9.9930048965724e-005 0.053191206429753 0.806445109313635 0.053191206429753 9.9930048965724e-005 0.0053191206429753 0.0806445109313634 0.132978016074383 9.9930048965724e-005 0.0053191206429753 0.00806445109313635 0.265956032148765 9.9930048965724e-005 0.0053191206429753 0.00806445109313635 0.53191206429753 9.9930048965724e-005 5.3191206429753e-006 8.06445109313635e-006 5.3191206429753e-006 0.99930048965724]);
+
+i = stringmatch('FclrSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.472366556447804 0.0044762757385855 6.20655412115194e-005 0.472366556447804 0.044762757385855 6.20655412115194e-005 0.0472366556447804 0.44762757385855 0.000620655412115194 0.000472366556447804 0.44762757385855 0.00620655412115194 0.000472366556447804 0.044762757385855 0.0620655412115194 0.000472366556447804 0.0044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.472366556447804 0.0044762757385855 6.20655412115194e-005 0.472366556447804 0.044762757385855 6.20655412115194e-005 0.0472366556447804 0.44762757385855 0.000620655412115194 0.000472366556447804 0.44762757385855 0.00620655412115194 0.000472366556447804 0.044762757385855 0.0620655412115194 0.000472366556447804 0.0044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194]);
+
+i = stringmatch('BXdotSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.689655172413793 0.172413793103448 0.0546448087431694 0.00546448087431694 0.00546448087431694 0.00546448087431694 0.00574712643678161 0.00689655172413793 0.206896551724138 0.574712643678161 0.163934426229508 0.0546448087431694 0.00546448087431694 0.00546448087431694 0.00574712643678161 0.00689655172413793 0.0689655172413793 0.172413793103448 0.546448087431694 0.163934426229508 0.0546448087431694 0.00546448087431694 0.00574712643678161 0.00689655172413793 0.00689655172413793 0.0574712643678161 0.163934426229508 0.546448087431694 0.163934426229508 0.0546448087431694 0.00574712643678161 0.00689655172413793 0.00689655172413793 0.00574712643678161 0.0546448087431694 0.163934426229508 0.546448087431694 0.163934426229508 0.0574712643678161 0.00689655172413793 0.00689655172413793 0.00574712643678161 0.00546448087431694 0.0546448087431694 0.163934426229508 0.546448087431694 0.172413793103448 0.0689655172413793 0.00689655172413793 0.00574712643678161 0.00546448087431694 0.00546448087431694 0.0546448087431694 0.163934426229508 0.574712643678161 0.206896551724138 0.00689655172413793 0.00574712643678161 0.00546448087431694 0.00546448087431694 0.00546448087431694 0.0546448087431694 0.172413793103448 0.689655172413793]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.689655172413793 0.172413793103448 0.0546448087431694 0.00546448087431694 0.00546448087431694 0.00546448087431694 0.00574712643678161 0.00689655172413793 0.206896551724138 0.574712643678161 0.163934426229508 0.0546448087431694 0.00546448087431694 0.00546448087431694 0.00574712643678161 0.00689655172413793 0.0689655172413793 0.172413793103448 0.546448087431694 0.163934426229508 0.0546448087431694 0.00546448087431694 0.00574712643678161 0.00689655172413793 0.00689655172413793 0.0574712643678161 0.163934426229508 0.546448087431694 0.163934426229508 0.0546448087431694 0.00574712643678161 0.00689655172413793 0.00689655172413793 0.00574712643678161 0.0546448087431694 0.163934426229508 0.546448087431694 0.163934426229508 0.0574712643678161 0.00689655172413793 0.00689655172413793 0.00574712643678161 0.00546448087431694 0.0546448087431694 0.163934426229508 0.546448087431694 0.172413793103448 0.0689655172413793 0.00689655172413793 0.00574712643678161 0.00546448087431694 0.00546448087431694 0.0546448087431694 0.163934426229508 0.574712643678161 0.206896551724138 0.00689655172413793 0.00574712643678161 0.00546448087431694 0.00546448087431694 0.00546448087431694 0.0546448087431694 0.172413793103448 0.689655172413793]);
+
+i = stringmatch('BclrSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.472366556447804 0.0044762757385855 6.20655412115194e-005 0.472366556447804 0.044762757385855 6.20655412115194e-005 0.0472366556447804 0.44762757385855 0.000620655412115194 0.000472366556447804 0.44762757385855 0.00620655412115194 0.000472366556447804 0.044762757385855 0.0620655412115194 0.000472366556447804 0.0044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.472366556447804 0.0044762757385855 6.20655412115194e-005 0.472366556447804 0.044762757385855 6.20655412115194e-005 0.0472366556447804 0.44762757385855 0.000620655412115194 0.000472366556447804 0.44762757385855 0.00620655412115194 0.000472366556447804 0.044762757385855 0.0620655412115194 0.000472366556447804 0.0044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194 0.000472366556447804 0.00044762757385855 0.0620655412115194]);
+
+i = stringmatch('BYdotDiffSens', names);
+%bnet.CPD{i} = tabular_CPD(bnet, i, [0.53191206429753 0.00806445109313635 0.0053191206429753 9.9930048965724e-005 0.265956032148765 0.00806445109313635 0.0053191206429753 9.9930048965724e-005 0.132978016074383 0.0806445109313634 0.0053191206429753 9.9930048965724e-005 0.053191206429753 0.806445109313635 0.053191206429753 9.9930048965724e-005 0.0053191206429753 0.0806445109313634 0.132978016074383 9.9930048965724e-005 0.0053191206429753 0.00806445109313635 0.265956032148765 9.9930048965724e-005 0.0053191206429753 0.00806445109313635 0.53191206429753 9.9930048965724e-005 5.3191206429753e-006 8.06445109313635e-006 5.3191206429753e-006 0.99930048965724]);
+bnet.CPD{i+ss} = tabular_CPD(bnet, i+ss, [0.53191206429753 0.00806445109313635 0.0053191206429753 9.9930048965724e-005 0.265956032148765 0.00806445109313635 0.0053191206429753 9.9930048965724e-005 0.132978016074383 0.0806445109313634 0.0053191206429753 9.9930048965724e-005 0.053191206429753 0.806445109313635 0.053191206429753 9.9930048965724e-005 0.0053191206429753 0.0806445109313634 0.132978016074383 9.9930048965724e-005 0.0053191206429753 0.00806445109313635 0.265956032148765 9.9930048965724e-005 0.0053191206429753 0.00806445109313635 0.53191206429753 9.9930048965724e-005 5.3191206429753e-006 8.06445109313635e-006 5.3191206429753e-006 0.99930048965724]);
+%BIF2BNT added a bunch of zeros at the end of this cpd. Hopefully the only occurence of this bug! 0 0 0 0 0 0 0 0]);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_orig_water_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_orig_water_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,121 @@
+function dbn = mk_orig_water_dbn
+% Converted by Frank Hutter:
+% Provided in Phrog format by Xavier Boyen.
+% Manually converted into BIF.
+% Converted to BNT from BIF by Web-based bif2bnt (2004-01-30T05:28:10)
+% Manually converted into function creating DBN.
+% Manually changed the node numbering s.t. A-B-C-D-E-F-G-H from the BK paper correspond to 1-2-3-4-5-6-7-8
+
+node = struct('C_NI_12_ANT', 1, ...
+ 'CKNI_12_ANT', 2, ...
+ 'CBODD_12_ANT', 3, ...
+ 'CNOD_12_ANT', 4, ...
+ 'CBODN_12_ANT', 5, ...
+ 'CNON_12_ANT', 6, ...
+ 'CKND_12_ANT', 7, ...
+ 'CKNN_12_ANT', 8, ...
+ 'C_NI_12_OBS', 9, ...
+ 'CKNI_12_OBS', 10, ...
+ 'CBODD_12_OBS', 11, ...
+ 'CNOD_12_OBS', 12, ...
+ 'CBODN_12_OBS', 13, ...
+ 'CNON_12_OBS', 14, ...
+ 'CKND_12_OBS', 15, ...
+ 'CKNN_12_OBS', 16, ...
+ 'C_NI_12_ULT', 17, ...
+ 'CKNI_12_ULT', 18, ...
+ 'CBODD_12_ULT', 19, ...
+ 'CNOD_12_ULT', 20, ...
+ 'CBODN_12_ULT', 21, ...
+ 'CNON_12_ULT', 22, ...
+ 'CKND_12_ULT', 23, ...
+ 'CKNN_12_ULT', 24);
+
+adjacency = zeros(24);
+adjacency([node.C_NI_12_ANT], node.C_NI_12_OBS) = 1;
+adjacency([node.CKNI_12_ANT], node.CKNI_12_OBS) = 1;
+adjacency([node.CBODD_12_ANT], node.CBODD_12_OBS) = 1;
+adjacency([node.CKND_12_ANT], node.CKND_12_OBS) = 1;
+adjacency([node.CNOD_12_ANT], node.CNOD_12_OBS) = 1;
+adjacency([node.CBODN_12_ANT], node.CBODN_12_OBS) = 1;
+adjacency([node.CKNN_12_ANT], node.CKNN_12_OBS) = 1;
+adjacency([node.CNON_12_ANT], node.CNON_12_OBS) = 1;
+adjacency([node.C_NI_12_ANT], node.C_NI_12_ULT) = 1;
+adjacency([node.CKNI_12_ANT], node.CKNI_12_ULT) = 1;
+adjacency([node.CBODN_12_ANT node.CNOD_12_ANT node.CBODD_12_ANT node.CKNI_12_ANT node.C_NI_12_ANT], node.CBODD_12_ULT) = 1;
+adjacency([node.CKNN_12_ANT node.CKND_12_ANT node.CKNI_12_ANT], node.CKND_12_ULT) = 1;
+adjacency([node.CNON_12_ANT node.CNOD_12_ANT node.CBODD_12_ANT], node.CNOD_12_ULT) = 1;
+adjacency([node.CNON_12_ANT node.CBODN_12_ANT node.CBODD_12_ANT], node.CBODN_12_ULT) = 1;
+adjacency([node.CKNN_12_ANT node.CKND_12_ANT], node.CKNN_12_ULT) = 1;
+adjacency([node.CNON_12_ANT node.CKNN_12_ANT node.CBODN_12_ANT node.CNOD_12_ANT], node.CNON_12_ULT) = 1;
+
+ss = 16;
+dnodes = 1:ss;
+ant = 1:8;
+onodes = 9:16;
+ult = 17:24;
+intra = adjacency(1:ss, 1:ss);
+inter_real = adjacency(ant, ult);
+inter = zeros(ss);
+inter(ant,1:length(ult)) = inter_real;
+
+eclass1 = 1:16;
+eclass2 = [17:24 9:16];
+
+value = {{'zz3num'; 'zz4num'; 'zz5num'; 'zz6num'}, ...
+ {'zz20mgl'; 'zz30mgl'; 'zz40mgl'}, ...
+ {'zz15mgl'; 'zz20mgl'; 'zz25mgl'; 'zz30mgl'}, ...
+ {'zz05mgl'; 'zz1mgl'; 'zz2mgl'; 'zz4mgl'}, ...
+ {'zz5mgl'; 'zz10mgl'; 'zz15mgl'; 'zz20mgl'}, ...
+ {'zz2mgl'; 'zz4mgl'; 'zz6mgl'; 'zz10mgl'}, ...
+ {'zz2mgl'; 'zz4mgl'; 'zz6mgl'}, ...
+ {'zz05mgl'; 'zz1mgl'; 'zz2mgl'}, ...
+ {'zz3num'; 'zz4num'; 'zz5num'; 'zz6num'}, ...
+ {'zz20mgl'; 'zz30mgl'; 'zz40mgl'}, ...
+ {'zz15mgl'; 'zz20mgl'; 'zz25mgl'; 'zz30mgl'}, ...
+ {'zz05mgl'; 'zz1mgl'; 'zz2mgl'; 'zz4mgl'}, ...
+ {'zz5mgl'; 'zz10mgl'; 'zz15mgl'; 'zz20mgl'}, ...
+ {'zz2mgl'; 'zz4mgl'; 'zz6mgl'; 'zz10mgl'}, ...
+ {'zz2mgl'; 'zz4mgl'; 'zz6mgl'}, ...
+ {'zz05mgl'; 'zz1mgl'; 'zz2mgl'}, ...
+ {'zz3num'; 'zz4num'; 'zz5num'; 'zz6num'}, ...
+ {'zz20mgl'; 'zz30mgl'; 'zz40mgl'}, ...
+ {'zz15mgl'; 'zz20mgl'; 'zz25mgl'; 'zz30mgl'}, ...
+ {'zz05mgl'; 'zz1mgl'; 'zz2mgl'; 'zz4mgl'}, ...
+ {'zz5mgl'; 'zz10mgl'; 'zz15mgl'; 'zz20mgl'}, ...
+ {'zz2mgl'; 'zz4mgl'; 'zz6mgl'; 'zz10mgl'}, ...
+ {'zz2mgl'; 'zz4mgl'; 'zz6mgl'}, ...
+ {'zz05mgl'; 'zz1mgl'; 'zz2mgl'}};
+
+ns = zeros(1,24);
+for i=1:24
+ ns(i) = length(value{i});
+end
+
+dbn = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'eclass1', eclass1, 'eclass2', eclass2, ...
+ 'observed', onodes);
+
+dbn.CPD{node.C_NI_12_ANT} = tabular_CPD(dbn, node.C_NI_12_ANT, 1/ns(1) * ones(1,ns(1)));
+dbn.CPD{node.CKNI_12_ANT} = tabular_CPD(dbn, node.CKNI_12_ANT, 1/ns(2) * ones(1,ns(2)));
+dbn.CPD{node.CBODD_12_ANT} = tabular_CPD(dbn, node.CBODD_12_ANT, 1/ns(3) * ones(1,ns(3)));
+dbn.CPD{node.CNOD_12_ANT} = tabular_CPD(dbn, node.CNOD_12_ANT, 1/ns(4) * ones(1,ns(4)));
+dbn.CPD{node.CBODN_12_ANT} = tabular_CPD(dbn, node.CBODN_12_ANT, 1/ns(5) * ones(1,ns(5)));
+dbn.CPD{node.CNON_12_ANT} = tabular_CPD(dbn, node.CNON_12_ANT, 1/ns(6) * ones(1,ns(6)));
+dbn.CPD{node.CKND_12_ANT} = tabular_CPD(dbn, node.CKND_12_ANT, 1/ns(7) * ones(1,ns(7)));
+dbn.CPD{node.CKNN_12_ANT} = tabular_CPD(dbn, node.CKNN_12_ANT, 1/ns(8) * ones(1,ns(8)));
+dbn.CPD{node.C_NI_12_OBS} = tabular_CPD(dbn, node.C_NI_12_OBS, [0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7]);
+dbn.CPD{node.CKNI_12_OBS} = tabular_CPD(dbn, node.CKNI_12_OBS, [0.8 0.1 0.1 0.1 0.8 0.1 0.1 0.1 0.8]);
+dbn.CPD{node.CBODD_12_OBS} = tabular_CPD(dbn, node.CBODD_12_OBS, [0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7]);
+dbn.CPD{node.CKND_12_OBS} = tabular_CPD(dbn, node.CKND_12_OBS, [0.8 0.1 0.1 0.1 0.8 0.1 0.1 0.1 0.8]);
+dbn.CPD{node.CNOD_12_OBS} = tabular_CPD(dbn, node.CNOD_12_OBS, [0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7]);
+dbn.CPD{node.CBODN_12_OBS} = tabular_CPD(dbn, node.CBODN_12_OBS, [0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7]);
+dbn.CPD{node.CKNN_12_OBS} = tabular_CPD(dbn, node.CKNN_12_OBS, [0.8 0.1 0.1 0.1 0.8 0.1 0.1 0.1 0.8]);
+dbn.CPD{node.CNON_12_OBS} = tabular_CPD(dbn, node.CNON_12_OBS, [0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7 0.1 0.1 0.1 0.1 0.7]);
+dbn.CPD{node.C_NI_12_ULT} = tabular_CPD(dbn, node.C_NI_12_ULT, [0.5 0.2 0.1 0 0.4 0.55 0.3 0.15 0.1 0.2 0.5 0.25 0 0.05 0.1 0.6]);
+dbn.CPD{node.CKNI_12_ULT} = tabular_CPD(dbn, node.CKNI_12_ULT, [0.48 0.2 0.04 0.48 0.6 0.48 0.04 0.2 0.48]);
+dbn.CPD{node.CBODD_12_ULT} = tabular_CPD(dbn, node.CBODD_12_ULT, [1 1 0.9791 0.9473 0.9949 0.9473 0.8997 0.8521 0.9473 0.8838 0.8203 0.7568 0.0903 0.0585 0.0268 0 0.0426 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0.9951 0.9634 1 0.9634 0.9158 0.8681 0.9634 0.8999 0.8364 0.7729 0.109 0.0773 0.0455 0.0138 0.0614 0.0138 0 0 0.0138 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0.9762 1 0.9762 0.9286 0.881 0.9762 0.9127 0.8493 0.7858 0.124 0.0923 0.0605 0.0288 0.0764 0.0288 0 0 0.0288 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0.9848 1 0.9848 0.9372 0.8896 0.9848 0.9213 0.8578 0.7943 0.134 0.1023 0.0705 0.0388 0.0864 0.0388 0 0 0.0388 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0.9791 0.9473 0.9156 0.9632 0.9156 0.8679 0.8203 0.9156 0.8521 0.7886 0.7251 0.0585 0.0268 0 0 0.0109 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0.9951 0.9634 0.9316 0.9793 0.9316 0.884 0.8364 0.9316 0.8681 0.8046 0.7412 0.0773 0.0455 0.0138 0 0.0296 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0.9762 0.9445 0.9921 0.9445 0.8969 0.8493 0.9445 0.881 0.8175 0.754 0.0923 0.0605 0.0288 0 0.0446 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0.9848 0.9531 1 0.9531 0.9054 0.8578 0.9531 0.8896 0.8261 0.7626 0.1023 0.0705 0.0388 0.007 0.0546 0.007 0 0 0.007 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.9791 0.9473 0.9156 0.8838 0.9314 0.8838 0.8362 0.7886 0.8838 0.8203 0.7568 0.6933 0.0268 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.9951 0.9634 0.9316 0.8999 0.9475 0.8999 0.8523 0.8046 0.8999 0.8364 0.7729 0.7094 0.0455 0.0138 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0.9762 0.9445 0.9127 0.9604 0.9127 0.8651 0.8175 0.9127 0.8493 0.7858 0.7223 0.0605 0.0288 0 0 0.0129 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0.9848 0.9531 0.9213 0.9689 0.9213 0.8737 0.8261 0.9213 0.8578 0.7943 0.7308 0.0705 0.0388 0.007 0 0.0229 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.9473 0.9156 0.8838 0.8521 0.8997 0.8521 0.8045 0.7568 0.8521 0.7886 0.7251 0.6616 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.9634 0.9316 0.8999 0.8681 0.9158 0.8681 0.8205 0.7729 0.8681 0.8046 0.7412 0.6777 0.0138 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.9762 0.9445 0.9127 0.881 0.9286 0.881 0.8334 0.7858 0.881 0.8175 0.754 0.6905 0.0288 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.9848 0.9531 0.9213 0.8896 0.9372 0.8896 0.842 0.7943 0.8896 0.8261 0.7626 0.6991 0.0388 0.007 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0209 0.0527 0.0051 0.0527 0.1003 0.1479 0.0527 0.1162 0.1797 0.2432 0.9097 0.9415 0.9732 0.995 0.9574 0.995 0.9474 0.8998 0.995 0.9315 0.868 0.8045 0.1362 0.1045 0.0727 0.041 0.0886 0.041 0 0 0.041 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0049 0.0366 0 0.0366 0.0842 0.1319 0.0366 0.1001 0.1636 0.2271 0.891 0.9227 0.9545 0.9862 0.9386 0.9862 0.9662 0.9185 0.9862 0.9503 0.8868 0.8233 0.157 0.1253 0.0935 0.0618 0.1094 0.0618 0.0142 0 0.0618 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0238 0 0.0238 0.0714 0.119 0.0238 0.0873 0.1507 0.2142 0.876 0.9077 0.9395 0.9712 0.9236 0.9712 0.9812 0.9335 0.9712 0.9653 0.9018 0.8383 0.1737 0.142 0.1102 0.0785 0.1261 0.0785 0.0308 0 0.0785 0.015 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0152 0 0.0152 0.0628 0.1104 0.0152 0.0787 0.1422 0.2057 0.866 0.8977 0.9295 0.9612 0.9136 0.9612 0.9912 0.9435 0.9612 0.9753 0.9118 0.8483 0.1848 0.1531 0.1213 0.0896 0.1372 0.0896 0.042 0 0.0896 0.0261 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0209 0.0527 0.0844 0.0368 0.0844 0.1321 0.1797 0.0844 0.1479 0.2114 0.2749 0.9415 0.9732 0.995 0.9633 0.9891 0.9633 0.9157 0.868 0.9633 0.8998 0.8363 0.7728 0.1045 0.0727 0.041 0.0092 0.0568 0.0092 0 0 0.0092 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0049 0.0366 0.0684 0.0207 0.0684 0.116 0.1636 0.0684 0.1319 0.1954 0.2588 0.9227 0.9545 0.9862 0.982 0.9704 0.982 0.9344 0.8868 0.982 0.9185 0.855 0.7916 0.1253 0.0935 0.0618 0.03 0.0777 0.03 0 0 0.03 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0238 0.0555 0.0079 0.0555 0.1031 0.1507 0.0555 0.119 0.1825 0.246 0.9077 0.9395 0.9712 0.997 0.9554 0.997 0.9494 0.9018 0.997 0.9335 0.87 0.8066 0.142 0.1102 0.0785 0.0467 0.0943 0.0467 0 0 0.0467 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0152 0.0469 0 0.0469 0.0946 0.1422 0.0469 0.1104 0.1739 0.2374 0.8977 0.9295 0.9612 0.993 0.9454 0.993 0.9594 0.9118 0.993 0.9435 0.88 0.8166 0.1531 0.1213 0.0896 0.0578 0.1054 0.0578 0.0102 0 0.0578 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0209 0.0527 0.0844 0.1162 0.0686 0.1162 0.1638 0.2114 0.1162 0.1797 0.2432 0.3067 0.9732 0.995 0.9633 0.9315 0.9792 0.9315 0.8839 0.8363 0.9315 0.868 0.8045 0.7411 0.0727 0.041 0.0092 0 0.0251 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0049 0.0366 0.0684 0.1001 0.0525 0.1001 0.1477 0.1954 0.1001 0.1636 0.2271 0.2906 0.9545 0.9862 0.982 0.9503 0.9979 0.9503 0.9027 0.855 0.9503 0.8868 0.8233 0.7598 0.0935 0.0618 0.03 0 0.0459 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0238 0.0555 0.0873 0.0396 0.0873 0.1349 0.1825 0.0873 0.1507 0.2142 0.2777 0.9395 0.9712 0.997 0.9653 0.9871 0.9653 0.9177 0.87 0.9653 0.9018 0.8383 0.7748 0.1102 0.0785 0.0467 0.015 0.0626 0.015 0 0 0.015 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0152 0.0469 0.0787 0.0311 0.0787 0.1263 0.1739 0.0787 0.1422 0.2057 0.2692 0.9295 0.9612 0.993 0.9753 0.9771 0.9753 0.9277 0.88 0.9753 0.9118 0.8483 0.7848 0.1213 0.0896 0.0578 0.0261 0.0737 0.0261 0 0 0.0261 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0527 0.0844 0.1162 0.1479 0.1003 0.1479 0.1955 0.2432 0.1479 0.2114 0.2749 0.3384 0.995 0.9633 0.9315 0.8998 0.9474 0.8998 0.8522 0.8045 0.8998 0.8363 0.7728 0.7093 0.041 0.0092 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0366 0.0684 0.1001 0.1319 0.0842 0.1319 0.1795 0.2271 0.1319 0.1954 0.2588 0.3223 0.9862 0.982 0.9503 0.9185 0.9662 0.9185 0.8709 0.8233 0.9185 0.855 0.7916 0.7281 0.0618 0.03 0 0 0.0142 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0238 0.0555 0.0873 0.119 0.0714 0.119 0.1666 0.2142 0.119 0.1825 0.246 0.3095 0.9712 0.997 0.9653 0.9335 0.9812 0.9335 0.8859 0.8383 0.9335 0.87 0.8066 0.7431 0.0785 0.0467 0.015 0 0.0308 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0152 0.0469 0.0787 0.1104 0.0628 0.1104 0.158 0.2057 0.1104 0.1739 0.2374 0.3009 0.9612 0.993 0.9753 0.9435 0.9912 0.9435 0.8959 0.8483 0.9435 0.88 0.8166 0.7531 0.0896 0.0578 0.0261 0 0.042 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.005 0 0.005 0.0526 0.1002 0.005 0.0685 0.132 0.1955 0.8638 0.8955 0.9273 0.959 0.9114 0.959 0.9933 0.9457 0.959 0.9775 0.914 0.8505 0.1809 0.1491 0.1174 0.0856 0.1333 0.0856 0.038 0 0.0856 0.0221 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0338 0.0815 0 0.0497 0.1132 0.1767 0.843 0.8747 0.9065 0.9382 0.8906 0.9382 0.9858 0.9666 0.9382 0.9983 0.9348 0.8713 0.2034 0.1716 0.1399 0.1081 0.1558 0.1081 0.0605 0.0129 0.1081 0.0446 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0188 0.0665 0 0.0347 0.0982 0.1617 0.8263 0.858 0.8898 0.9215 0.8739 0.9215 0.9692 0.9832 0.9215 0.985 0.9515 0.888 0.2214 0.1896 0.1579 0.1261 0.1738 0.1261 0.0785 0.0309 0.1261 0.0626 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0088 0.0565 0 0.0247 0.0882 0.1517 0.8152 0.8469 0.8787 0.9104 0.8628 0.9104 0.958 0.9943 0.9104 0.9739 0.9626 0.8991 0.2334 0.2016 0.1699 0.1381 0.1858 0.1381 0.0905 0.0429 0.1381 0.0746 0.0112 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.005 0.0367 0 0.0367 0.0843 0.132 0.0367 0.1002 0.1637 0.2272 0.8955 0.9273 0.959 0.9908 0.9432 0.9908 0.9616 0.914 0.9908 0.9457 0.8822 0.8187 0.1491 0.1174 0.0856 0.0539 0.1015 0.0539 0.0063 0 0.0539 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.018 0 0.018 0.0656 0.1132 0.018 0.0815 0.145 0.2084 0.8747 0.9065 0.9382 0.97 0.9223 0.97 0.9824 0.9348 0.97 0.9666 0.9031 0.8396 0.1716 0.1399 0.1081 0.0764 0.124 0.0764 0.0288 0 0.0764 0.0129 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.003 0 0.003 0.0506 0.0982 0.003 0.0665 0.13 0.1934 0.858 0.8898 0.9215 0.9533 0.9057 0.9533 0.9991 0.9515 0.9533 0.9832 0.9197 0.8562 0.1896 0.1579 0.1261 0.0944 0.142 0.0944 0.0468 0 0.0944 0.0309 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0406 0.0882 0 0.0565 0.12 0.1834 0.8469 0.8787 0.9104 0.9422 0.8946 0.9422 0.9898 0.9626 0.9422 0.9943 0.9308 0.8673 0.2016 0.1699 0.1381 0.1064 0.154 0.1064 0.0588 0.0112 0.1064 0.0429 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.005 0.0367 0.0685 0.0208 0.0685 0.1161 0.1637 0.0685 0.132 0.1955 0.2589 0.9273 0.959 0.9908 0.9775 0.9749 0.9775 0.9298 0.8822 0.9775 0.914 0.8505 0.787 0.1174 0.0856 0.0539 0.0221 0.0698 0.0221 0 0 0.0221 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.018 0.0497 0.0021 0.0497 0.0973 0.145 0.0497 0.1132 0.1767 0.2402 0.9065 0.9382 0.97 0.9983 0.9541 0.9983 0.9507 0.9031 0.9983 0.9348 0.8713 0.8078 0.1399 0.1081 0.0764 0.0446 0.0923 0.0446 0 0 0.0446 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.003 0.0347 0 0.0347 0.0823 0.13 0.0347 0.0982 0.1617 0.2252 0.8898 0.9215 0.9533 0.985 0.9374 0.985 0.9673 0.9197 0.985 0.9515 0.888 0.8245 0.1579 0.1261 0.0944 0.0626 0.1103 0.0626 0.015 0 0.0626 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0247 0 0.0247 0.0723 0.12 0.0247 0.0882 0.1517 0.2152 0.8787 0.9104 0.9422 0.9739 0.9263 0.9739 0.9785 0.9308 0.9739 0.9626 0.8991 0.8356 0.1699 0.1381 0.1064 0.0746 0.1223 0.0746 0.027 0 0.0746 0.0112 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.005 0.0367 0.0685 0.1002 0.0526 0.1002 0.1478 0.1955 0.1002 0.1637 0.2272 0.2907 0.959 0.9908 0.9775 0.9457 0.9933 0.9457 0.8981 0.8505 0.9457 0.8822 0.8187 0.7552 0.0856 0.0539 0.0221 0 0.038 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.018 0.0497 0.0815 0.0338 0.0815 0.1291 0.1767 0.0815 0.145 0.2084 0.2719 0.9382 0.97 0.9983 0.9666 0.9858 0.9666 0.9189 0.8713 0.9666 0.9031 0.8396 0.7761 0.1081 0.0764 0.0446 0.0129 0.0605 0.0129 0 0 0.0129 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.003 0.0347 0.0665 0.0188 0.0665 0.1141 0.1617 0.0665 0.13 0.1934 0.2569 0.9215 0.9533 0.985 0.9832 0.9692 0.9832 0.9356 0.888 0.9832 0.9197 0.8562 0.7927 0.1261 0.0944 0.0626 0.0309 0.0785 0.0309 0 0 0.0309 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0247 0.0565 0.0088 0.0565 0.1041 0.1517 0.0565 0.12 0.1834 0.2469 0.9104 0.9422 0.9739 0.9943 0.958 0.9943 0.9467 0.8991 0.9943 0.9308 0.8673 0.8039 0.1381 0.1064 0.0746 0.0429 0.0905 0.0429 0 0 0.0429 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0067 0.0543 0 0.0225 0.086 0.1495 0.8191 0.8509 0.8826 0.9144 0.8667 0.9144 0.962 1 0.9144 0.9779 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0334 0 0.0017 0.0652 0.1287 0.7966 0.8284 0.8601 0.8919 0.8442 0.8919 0.9395 0.9871 0.8919 0.9554 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0168 0 0 0.0485 0.112 0.7786 0.8104 0.8421 0.8739 0.8262 0.8739 0.9215 0.9691 0.8739 0.9374 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0057 0 0 0.0374 0.1009 0.7666 0.7984 0.8301 0.8619 0.8142 0.8619 0.9095 0.9571 0.8619 0.9254 0.9888 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0384 0.086 0 0.0543 0.1178 0.1813 0.8509 0.8826 0.9144 0.9461 0.8985 0.9461 0.9937 1 0.9461 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0176 0.0652 0 0.0334 0.0969 0.1604 0.8284 0.8601 0.8919 0.9236 0.876 0.9236 0.9712 1 0.9236 0.9871 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0009 0.0485 0 0.0168 0.0803 0.1438 0.8104 0.8421 0.8739 0.9056 0.858 0.9056 0.9532 1 0.9056 0.9691 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0374 0 0.0057 0.0692 0.1327 0.7984 0.8301 0.8619 0.8936 0.846 0.8936 0.9412 0.9888 0.8936 0.9571 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0225 0 0.0225 0.0702 0.1178 0.0225 0.086 0.1495 0.213 0.8826 0.9144 0.9461 0.9779 0.9302 0.9779 1 1 0.9779 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0017 0 0.0017 0.0493 0.0969 0.0017 0.0652 0.1287 0.1922 0.8601 0.8919 0.9236 0.9554 0.9077 0.9554 1 1 0.9554 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0327 0.0803 0 0.0485 0.112 0.1755 0.8421 0.8739 0.9056 0.9374 0.8897 0.9374 0.985 1 0.9374 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0215 0.0692 0 0.0374 0.1009 0.1644 0.8301 0.8619 0.8936 0.9254 0.8777 0.9254 0.973 1 0.9254 0.9888 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0225 0.0543 0.0067 0.0543 0.1019 0.1495 0.0543 0.1178 0.1813 0.2448 0.9144 0.9461 0.9779 1 0.962 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0017 0.0334 0 0.0334 0.0811 0.1287 0.0334 0.0969 0.1604 0.2239 0.8919 0.9236 0.9554 0.9871 0.9395 0.9871 1 1 0.9871 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0168 0 0.0168 0.0644 0.112 0.0168 0.0803 0.1438 0.2073 0.8739 0.9056 0.9374 0.9691 0.9215 0.9691 1 1 0.9691 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0057 0 0.0057 0.0533 0.1009 0.0057 0.0692 0.1327 0.1961 0.8619 0.8936 0.9254 0.9571 0.9095 0.9571 1 1 0.9571 1 1 1]);
+dbn.CPD{node.CKND_12_ULT} = tabular_CPD(dbn, node.CKND_12_ULT, [0.9524 0.9127 0.873 0 0 0 0 0 0 0.9444 0.9048 0.8651 0 0 0 0 0 0 0.9286 0.8889 0.8492 0 0 0 0 0 0 0.0476 0.0873 0.127 0.9921 0.9524 0.9127 0.0317 0 0 0.0556 0.0952 0.1349 0.9841 0.9444 0.9048 0.0238 0 0 0.0714 0.1111 0.1508 0.9683 0.9286 0.8889 0.0079 0 0 0 0 0 0.0079 0.0476 0.0873 0.9683 1 1 0 0 0 0.0159 0.0556 0.0952 0.9762 1 1 0 0 0 0.0317 0.0714 0.1111 0.9921 1 1]);
+dbn.CPD{node.CNOD_12_ULT} = tabular_CPD(dbn, node.CNOD_12_ULT, [1 1 1 1 0.3675 0.4905 0.5862 0.6627 0 0 0 0 0 0 0 0 1 1 1 1 0.2405 0.3635 0.4592 0.5358 0 0 0 0 0 0 0 0 0.8893 0.9816 1 1 0.1135 0.2366 0.3322 0.4088 0 0 0 0 0 0 0 0 0.6354 0.7276 0.7994 0.8568 0 0 0.0783 0.1548 0 0 0 0 0 0 0 0 0 0 0 0 0.6325 0.5095 0.4138 0.3373 0.2972 0.3711 0.4285 0.4744 0 0 0 0 0 0 0 0 0.7595 0.6365 0.5408 0.4642 0.2338 0.3076 0.365 0.4109 0 0 0 0 0.1107 0.0184 0 0 0.8865 0.7634 0.6678 0.5912 0.1703 0.2441 0.3015 0.3474 0 0 0 0 0.3646 0.2724 0.2006 0.1432 0.9298 0.9913 0.9217 0.8452 0.0433 0.1171 0.1745 0.2204 0 0 0 0 0 0 0 0 0 0 0 0 0.7028 0.6289 0.5715 0.5256 0.2129 0.2539 0.2858 0.3113 0 0 0 0 0 0 0 0 0.7662 0.6924 0.635 0.5891 0.1812 0.2222 0.2541 0.2796 0 0 0 0 0 0 0 0 0.8297 0.7559 0.6985 0.6526 0.1494 0.1904 0.2223 0.2478 0 0 0 0 0.0702 0.0087 0 0 0.9567 0.8829 0.8255 0.7796 0.0859 0.1269 0.1588 0.1843 0 0 0 0 0 0 0 0 0 0 0 0 0.7871 0.7461 0.7142 0.6887 0 0 0 0 0 0 0 0 0 0 0 0 0.8188 0.7778 0.7459 0.7204 0 0 0 0 0 0 0 0 0 0 0 0 0.8506 0.8096 0.7777 0.7522 0 0 0 0 0 0 0 0 0 0 0 0 0.9141 0.8731 0.8412 0.8157]);
+dbn.CPD{node.CBODN_12_ULT} = tabular_CPD(dbn, node.CBODN_12_ULT, [0.9557 0.9067 0.8577 0.8087 0.0406 0 0 0 0 0 0 0 0 0 0 0 0.9561 0.9071 0.8581 0.809 0.0412 0 0 0 0 0 0 0 0 0 0 0 0.9562 0.9072 0.8582 0.8092 0.0414 0 0 0 0 0 0 0 0 0 0 0 0.9564 0.9073 0.8583 0.8093 0.0416 0 0 0 0 0 0 0 0 0 0 0 0.0443 0.0933 0.1423 0.1913 0.9594 0.9916 0.9426 0.8936 0.1152 0.0662 0.0172 0 0 0 0 0 0.0439 0.0929 0.1419 0.191 0.9588 0.9922 0.9432 0.8942 0.116 0.067 0.018 0 0 0 0 0 0.0438 0.0928 0.1418 0.1908 0.9586 0.9924 0.9434 0.8944 0.1163 0.0673 0.0183 0 0 0 0 0 0.0436 0.0927 0.1417 0.1907 0.9584 0.9926 0.9436 0.8946 0.1166 0.0676 0.0185 0 0 0 0 0 0 0 0 0 0 0.0084 0.0574 0.1064 0.8848 0.9338 0.9828 0.9682 0.1835 0.1344 0.0854 0.0364 0 0 0 0 0 0.0078 0.0568 0.1058 0.884 0.933 0.982 0.969 0.1844 0.1354 0.0863 0.0373 0 0 0 0 0 0.0076 0.0566 0.1056 0.8837 0.9327 0.9817 0.9693 0.1847 0.1357 0.0867 0.0377 0 0 0 0 0 0.0074 0.0564 0.1054 0.8834 0.9324 0.9815 0.9695 0.185 0.136 0.087 0.038 0 0 0 0 0 0 0 0 0 0 0 0.0318 0.8165 0.8656 0.9146 0.9636 0 0 0 0 0 0 0 0 0 0 0 0.031 0.8156 0.8646 0.9137 0.9627 0 0 0 0 0 0 0 0 0 0 0 0.0307 0.8153 0.8643 0.9133 0.9623 0 0 0 0 0 0 0 0 0 0 0 0.0305 0.815 0.864 0.913 0.962]);
+dbn.CPD{node.CKNN_12_ULT} = tabular_CPD(dbn, node.CKNN_12_ULT, [1 1 0.8234 0.4459 0.2499 0.0538 0 0 0 0 0 0.1766 0.5541 0.7501 0.9462 0.3627 0.2646 0.1666 0 0 0 0 0 0 0.6373 0.7354 0.8334]);
+dbn.CPD{node.CNON_12_ULT} = tabular_CPD(dbn, node.CNON_12_ULT, [0.9555 0.9432 0.9187 0.8697 0.9618 0.9495 0.925 0.876 0.9662 0.954 0.9295 0.8804 0.9696 0.9573 0.9328 0.8838 0.9102 0.8979 0.8734 0.8244 0.9164 0.9042 0.8797 0.8306 0.9209 0.9086 0.8841 0.8351 0.9243 0.912 0.8875 0.8385 0.8648 0.8526 0.8281 0.779 0.8711 0.8588 0.8343 0.7853 0.8756 0.8633 0.8388 0.7898 0.8789 0.8667 0.8422 0.7931 0.0056 0 0 0 0.0125 0.0003 0 0 0.0175 0.0052 0 0 0.0212 0.009 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0445 0.0568 0.0813 0.1303 0.0382 0.0505 0.075 0.124 0.0338 0.046 0.0705 0.1196 0.0304 0.0427 0.0672 0.1162 0.0898 0.1021 0.1266 0.1756 0.0836 0.0958 0.1203 0.1694 0.0791 0.0914 0.1159 0.1649 0.0757 0.088 0.1125 0.1615 0.1352 0.1474 0.1719 0.221 0.1289 0.1412 0.1657 0.2147 0.1244 0.1367 0.1612 0.2102 0.1211 0.1333 0.1578 0.2069 0.9944 0.9933 0.9688 0.9198 0.9875 0.9997 0.9758 0.9267 0.9825 0.9948 0.9807 0.9317 0.9788 0.991 0.9845 0.9354 0.9602 0.948 0.9235 0.8744 0.9672 0.9549 0.9304 0.8814 0.9722 0.9599 0.9354 0.8864 0.9759 0.9636 0.9391 0.8901 0.9149 0.9026 0.8781 0.8291 0.9219 0.9096 0.8851 0.8361 0.9268 0.9146 0.8901 0.841 0.9306 0.9183 0.8938 0.8448 0.055 0.0427 0.0182 0 0.0622 0.05 0.0254 0 0.0674 0.0551 0.0306 0 0.0712 0.059 0.0345 0 0.0096 0 0 0 0.0169 0.0046 0 0 0.022 0.0098 0 0 0.0259 0.0137 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0067 0.0312 0.0802 0 0 0.0242 0.0733 0 0 0.0193 0.0683 0 0 0.0155 0.0646 0.0398 0.052 0.0765 0.1256 0.0328 0.0451 0.0696 0.1186 0.0278 0.0401 0.0646 0.1136 0.0241 0.0364 0.0609 0.1099 0.0851 0.0974 0.1219 0.1709 0.0781 0.0904 0.1149 0.1639 0.0732 0.0854 0.1099 0.159 0.0694 0.0817 0.1062 0.1552 0.945 0.9573 0.9818 0.9846 0.9378 0.95 0.9746 0.9882 0.9326 0.9449 0.9694 0.9908 0.9288 0.941 0.9655 0.9927 0.9904 0.9987 0.9864 0.9619 0.9831 0.9954 0.9901 0.9655 0.978 0.9902 0.9926 0.9681 0.9741 0.9863 0.9946 0.9701 0.9822 0.976 0.9638 0.9393 0.9858 0.9796 0.9674 0.9429 0.9884 0.9822 0.97 0.9455 0.9903 0.9842 0.9719 0.9474 0.0767 0.0706 0.0583 0.0338 0.0804 0.0743 0.062 0.0375 0.0831 0.0769 0.0647 0.0402 0.0851 0.0789 0.0667 0.0422 0.054 0.0479 0.0356 0.0111 0.0577 0.0516 0.0394 0.0149 0.0604 0.0543 0.042 0.0175 0.0624 0.0563 0.044 0.0195 0.0313 0.0252 0.013 0 0.0351 0.0289 0.0167 0 0.0377 0.0316 0.0194 0 0.0397 0.0336 0.0214 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0154 0 0 0 0.0118 0 0 0 0.0092 0 0 0 0.0073 0 0.0013 0.0136 0.0381 0 0 0.0099 0.0345 0 0 0.0074 0.0319 0 0 0.0054 0.0299 0.0178 0.024 0.0362 0.0607 0.0142 0.0204 0.0326 0.0571 0.0116 0.0178 0.03 0.0545 0.0097 0.0158 0.0281 0.0526 0.9233 0.9294 0.9417 0.9662 0.9196 0.9257 0.938 0.9625 0.9169 0.9231 0.9353 0.9598 0.9149 0.9211 0.9333 0.9578 0.946 0.9521 0.9644 0.9889 0.9423 0.9484 0.9606 0.9851 0.9396 0.9457 0.958 0.9825 0.9376 0.9437 0.956 0.9805 0.9687 0.9748 0.987 1 0.9649 0.9711 0.9833 1 0.9623 0.9684 0.9806 1 0.9603 0.9664 0.9786 1]);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_ps_from_clqs.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_ps_from_clqs.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function mk_ps_from_clqs(dbn, T, cliques, dir)
+
+% Draw multiple copies of the DBN,
+% and indicate the nodes in each clique by shading the nodes.
+% Generate a series of color postscript files,
+% or, if dir=[], displays them to the screen and pauses.
+
+if isempty(dir)
+ print_to_file = 0;
+else
+ print_to_file = 1;
+end
+
+if print_to_file, cd(dir), end
+flip = 1;
+clf;
+[dummyx, dummyy, h] = draw_dbn(dbn.intra, dbn.inter, flip, T, -1);
+
+C = length(cliques);
+
+% nodes = [];
+% for i=1:C
+% cl = cliques{i};
+% nodes = [nodes cl(:)'];
+% end
+%nodes = unique(nodes);
+ss = length(dbn.intra);
+nodes = 1:(ss*T);
+
+for c=1:C
+ for i=cliques{c}
+ set(h(i,2), 'facecolor', 'r');
+ end
+ rest = mysetdiff(nodes, cliques{c});
+ for i=rest
+ set(h(i,2), 'facecolor', 'w');
+ end
+ if print_to_file
+ print(gcf, '-depsc', sprintf('clq%d.ps', c))
+ else
+ disp(['clique ' num2str(c) ' = ' num2str(cliques{c}) '; hit key for next'])
+ pause
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_uffe_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_uffe_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function bnet = mk_uffe_dbn()
+
+% Make the Uffe DBN from fig 3.4 p55 of my thesis
+
+ss = 4;
+intra = zeros(ss,ss);
+intra(1,[2 3])=1;
+intra(2,3)=1;
+intra(3,4)=1;
+inter = zeros(ss,ss);
+inter(1,1)=1;
+inter(4,4)=1;
+ns = 2*ones(1,ss);
+bnet = mk_dbn(intra, inter, ns);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_water_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/mk_water_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,68 @@
+function bnet = mk_water_dbn(discrete_obs, obs_leaves)
+% MK_WATER_DBN
+% bnet = mk_water_dbn(discrete_obs, obs_leaves)
+%
+% If discrete_obs = 1 (default), the leaves are binary, else scalar Gaussians
+% If obs_leaves = 1, all the leaves are observed, otherwise rnd nodes are observed
+%
+% This is a model of the biological processes of a water purification plant, developed
+% by Finn V. Jensen, Uffe Kjærulff, Kristian G. Olesen, and Jan Pedersen.
+% See http://www-nt.cs.berkeley.edu/home/nir/public_html/Repository/water.htm
+% See also Boyen and Koller, "Tractable Inference for Complex Stochastic Processes", UAI98
+
+if nargin < 1, discrete_obs = 1; end
+if nargin < 1, obs_leaves = 1; end
+
+ss = 12;
+intra = zeros(ss);
+intra(1,9) = 1;
+intra(3,10) = 1;
+intra(4,11) = 1;
+intra(8,12) = 1;
+
+inter = zeros(ss);
+inter(1, [1 3]) = 1;
+inter(2, [2 3 7]) = 1;
+inter(3, [3 4 5]) = 1;
+inter(4, [3 4 6]) = 1;
+inter(5, [3 5 6]) = 1;
+inter(6, [4 5 6]) = 1;
+inter(7, [7 8]) = 1;
+inter(8, [6 7 8]) = 1;
+
+if obs_leaves
+ onodes = 9:12; % leaves
+else
+ onodes = [1 5 9:12]; % throw in some other nodes
+end
+hnodes = 1:8;
+if discrete_obs
+ ns = 2*ones(1 ,ss);
+ dnodes = 1:ss;
+else
+ ns = [2*ones(1,length(hnodes)) 1*ones(length(onodes))];
+ dnodes = hnodes;
+end
+
+eclass1 = 1:12;
+eclass2 = [13:20 9:12];
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'eclass1', eclass1, 'eclass2', eclass2, ...
+ 'observed', onodes);
+if discrete_obs
+ for i=1:max(eclass2)
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+else
+ for i=hnodes(:)'
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+ for i=onodes(:)'
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+ end
+ for i=hnodes(:)'+ss
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/orig_water1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/orig_water1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+% Compare the speeds of various inference engines on the water DBN
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+%bnet = mk_water_dbn;
+bnet = mk_orig_water_dbn;
+
+T = 3;
+engine = {};
+%engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+%engine{end+1} = smoother_engine(hmm_2TBN_inf_engine(bnet));
+%engine{end+1} = jtree_dbn_inf_engine(bnet);
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+engine{end+1} = cbk_inf_engine(bnet, 'clusters', {[1],[2],[3],[4],[5],[6],[7],[8]}); %ff
+engine{end+1} = cbk_inf_engine(bnet, 'clusters', {[1 2],[3 4 5 6],[7 8]}); %manually designed marginally independent by BK
+engine{end+1} = cbk_inf_engine(bnet, 'clusters', {[1:5], [3:7], [7:8]}); %manually designed conditionally independent by BK
+engine{end+1} = cbk_inf_engine(bnet, 'clusters', {[1 3], [2 3 7], [3 5], [3 4 7], [6 7 8]}); %automatically found using TJTs offline
+engine{end+1} = cbk_inf_engine(bnet, 'clusters', {[1 3 5], [2 3 5 7], [3 4 7], [4 6 7], [6 7 8]}); %automatically found using TJTs offline
+engine{end+1} = cbk_inf_engine(bnet, 'clusters', {[1 3 4 5], [2 3 4 7 8], [4 6 7 8]}); %automatically found using TJTs offline
+
+% bk_inf_engine yields exactly the same results for the marginally independent cases.
+%engine{end+1} = bk_inf_engine(bnet, 'clusters', 'ff');
+%engine{end+1} = bk_inf_engine(bnet, 'clusters', { [1 2], [3 4 5 6], [7 8] });
+
+
+inf_time = cmp_inference_dbn(bnet, engine, T, 'exact', 1)
+learning_time = cmp_learning_dbn(bnet, engine, T, 'exact', 1)
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/reveal1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/reveal1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,73 @@
+% Make a DBN with the following inter-connectivity matrix
+% 1
+% / \
+% 2 3
+% \ /
+% 4
+% |
+% 5
+% where all arcs point down. In addition, there are persistence arcs from each node to itself.
+% There are no intra-slice connections.
+% Nodes have noisy-or CPDs.
+% Node 1 turns on spontaneously due to its leaky source.
+% This effect trickles down to the other nodes in the order shown.
+% All the other nodes inhibit their leaks.
+% None of the nodes inhibit the connection from themselves, so that once they are on, they remain
+% on (persistence).
+%
+% This model was used in the experiments reported in
+% - "Learning the structure of DBNs", Friedman, Murphy and Russell, UAI 1998.
+% where the structure was learned even in the presence of missing data.
+% In that paper, we used the structural EM algorithm.
+% Here, we assume full observability and tabular CPDs for the learner, so we can use a much
+% simpler learning algorithm.
+
+ss = 5;
+
+inter = eye(ss);
+inter(1,[2 3]) = 1;
+inter(2,4)=1;
+inter(3,4)=1;
+inter(4,5)=1;
+
+intra = zeros(ss);
+ns = 2*ones(1,ss);
+
+bnet = mk_dbn(intra, inter, ns);
+
+% All nodes start out off
+for i=1:ss
+ bnet.CPD{i} = tabular_CPD(bnet, i, [1.0 0.0]');
+end
+
+% The following params correspond to Fig 4a in the UAI 98 paper
+% The first arg is the leak inhibition prob.
+% The vector contains the inhib probs from the parents in the previous slice;
+% the last element is self, which is never inhibited.
+bnet.CPD{1+ss} = noisyor_CPD(bnet, 1+ss, 0.8, 0);
+bnet.CPD{2+ss} = noisyor_CPD(bnet, 2+ss, 1, [0.9 0]);
+bnet.CPD{3+ss} = noisyor_CPD(bnet, 3+ss, 1, [0.8 0]);
+bnet.CPD{4+ss} = noisyor_CPD(bnet, 4+ss, 1, [0.7 0.6 0]);
+bnet.CPD{5+ss} = noisyor_CPD(bnet, 5+ss, 1, [0.5 0]);
+
+
+% Generate some training data
+
+nseqs = 20;
+seqs = cell(1,nseqs);
+T = 30;
+for i=1:nseqs
+ seqs{i} = sample_dbn(bnet, T);
+end
+
+max_fan_in = 3; % let's cheat a little here
+
+% computing num. incorrect edges as a fn of the size of the training set
+%sz = [5 10 15 20];
+sz = [5 10];
+h = zeros(1, length(sz));
+for i=1:length(sz)
+ inter2 = learn_struct_dbn_reveal(seqs(1:sz(i)), ns, max_fan_in);
+ h(i) = sum(abs(inter(:)-inter2(:))); % hamming distance
+end
+h
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/scg_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/scg_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+% Test whether stable conditional Gaussian inference works
+% Make a linear dynamical system
+% X1 -> X2
+% | |
+% v v
+% Y1 Y2
+
+intra = zeros(2);
+intra(1,2) = 1;
+inter = zeros(2);
+inter(1,1) = 1;
+n = 2;
+
+X = 2; % size of hidden state
+Y = 2; % size of observable state
+
+ns = [X Y];
+bnet = mk_dbn(intra, inter, ns, 'discrete', [], 'observed', 2);
+
+x0 = rand(X,1);
+V0 = eye(X);
+C0 = rand(Y,X);
+R0 = eye(Y);
+A0 = rand(X,X);
+Q0 = eye(X);
+
+bnet.CPD{1} = gaussian_CPD(bnet, 1, 'mean', x0, 'cov', V0);
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', zeros(Y,1), 'cov', R0, 'weights', C0);
+bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', zeros(X,1), 'cov', Q0, 'weights', A0);
+
+
+T = 5; % fixed length sequences
+
+engine = {};
+engine{end+1} = kalman_inf_engine(bnet);
+engine{end+1} = scg_unrolled_dbn_inf_engine(bnet, T);
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+
+inf_time = cmp_inference_dbn(bnet, engine, T, 'check_ll', 0);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/skf_data_assoc_gmux.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/skf_data_assoc_gmux.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,139 @@
+% We consider a switching Kalman filter of the kind studied
+% by Zoubin Ghahramani, i.e., where the switch node determines
+% which of the hidden chains we get to observe (data association).
+% e.g., for n=2 chains
+%
+% X1 -> X1
+% | X2 -> X2
+% \ |
+% v
+% Y
+% ^
+% |
+% S
+%
+% Y is a gmux (multiplexer) node, where S switches in one of the parents.
+% We differ from Zoubin by not connecting the S nodes over time (which
+% doesn't make sense for data association).
+% Indeed, we assume the S nodes are always observed.
+%
+%
+% We will track 2 objects (points) moving in the plane, as in BNT/Kalman/tracking_demo.
+% We will alternate between observing them.
+
+nobj = 2;
+N = nobj+2;
+Xs = 1:nobj;
+S = nobj+1;
+Y = nobj+2;
+
+intra = zeros(N,N);
+inter = zeros(N,N);
+intra([Xs S], Y) =1;
+for i=1:nobj
+ inter(Xs(i), Xs(i))=1;
+end
+
+Xsz = 4; % state space = (x y xdot ydot)
+Ysz = 2;
+ns = zeros(1,N);
+ns(Xs) = Xsz;
+ns(Y) = Ysz;
+ns(S) = n;
+
+bnet = mk_dbn(intra, inter, ns, 'discrete', S, 'observed', [S Y]);
+
+% For each object, we have
+% X(t+1) = F X(t) + noise(Q)
+% Y(t) = H X(t) + noise(R)
+F = [1 0 1 0; 0 1 0 1; 0 0 1 0; 0 0 0 1];
+H = [1 0 0 0; 0 1 0 0];
+Q = 1e-3*eye(Xsz);
+%R = 1e-3*eye(Ysz);
+R = eye(Ysz);
+
+% We initialise object 1 moving to the right, and object 2 moving to the left
+% (Here, we assume nobj=2)
+init_state{1} = [10 10 1 0]';
+init_state{2} = [10 -10 -1 0]';
+
+for i=1:nobj
+ bnet.CPD{Xs(i)} = gaussian_CPD(bnet, Xs(i), 'mean', init_state{i}, 'cov', 1e-4*eye(Xsz));
+end
+bnet.CPD{S} = root_CPD(bnet, S); % always observed
+bnet.CPD{Y} = gmux_CPD(bnet, Y, 'cov', repmat(R, [1 1 nobj]), 'weights', repmat(H, [1 1 nobj]));
+% slice 2
+eclass = bnet.equiv_class;
+for i=1:nobj
+ bnet.CPD{eclass(Xs(i), 2)} = gaussian_CPD(bnet, Xs(i)+N, 'mean', zeros(Xsz,1), 'cov', Q, 'weights', F);
+end
+
+% Observe objects at random
+T = 10;
+evidence = cell(N, T);
+data_assoc = sample_discrete(normalise(ones(1,nobj)), 1, T);
+evidence(S,:) = num2cell(data_assoc);
+evidence = sample_dbn(bnet, 'evidence', evidence);
+
+% plot the data
+true_state = cell(1,nobj);
+for i=1:nobj
+ true_state{i} = cell2num(evidence(Xs(i), :)); % true_state{i}(:,t) = [x y xdot ydot]'
+end
+obs_pos = cell2num(evidence(Y,:));
+figure(1)
+clf
+hold on
+styles = {'rx', 'go', 'b+', 'k*'};
+for i=1:nobj
+ plot(true_state{i}(1,:), true_state{i}(2,:), styles{i});
+end
+for t=1:T
+ text(obs_pos(1,t), obs_pos(2,t), sprintf('%d', t));
+end
+hold off
+relax_axes(0.1)
+
+
+% Inference
+ev = cell(N,T);
+ev(bnet.observed,:) = evidence(bnet.observed, :);
+
+engines = {};
+engines{end+1} = jtree_dbn_inf_engine(bnet);
+%engines{end+1} = scg_unrolled_dbn_inf_engine(bnet, T);
+engines{end+1} = pearl_unrolled_dbn_inf_engine(bnet);
+E = length(engines);
+
+inferred_state = cell(nobj,E); % inferred_state{i,e}(:,t)
+for e=1:E
+ engines{e} = enter_evidence(engines{e}, ev);
+ for i=1:nobj
+ inferred_state{i,e} = zeros(4, T);
+ for t=1:T
+ m = marginal_nodes(engines{e}, Xs(i), t);
+ inferred_state{i,e}(:,t) = m.mu;
+ end
+ end
+end
+inferred_state{1,1}
+inferred_state{1,2}
+
+% Plot results
+figure(2)
+clf
+hold on
+styles = {'rx', 'go', 'b+', 'k*'};
+nstyles = length(styles);
+c = 1;
+for e=1:E
+ for i=1:nobj
+ plot(inferred_state{i,e}(1,:), inferred_state{i,e}(2,:), styles{mod(c-1,nstyles)+1});
+ c = c + 1;
+ end
+end
+for t=1:T
+ text(obs_pos(1,t), obs_pos(2,t), sprintf('%d', t));
+end
+hold off
+relax_axes(0.1)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/viterbi1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/viterbi1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+% Compute Viterbi path discrete HMM by different methods
+
+intra = zeros(2);
+intra(1,2) = 1;
+inter = zeros(2);
+inter(1,1) = 1;
+n = 2;
+
+Q = 2; % num hidden states
+O = 2; % num observable symbols
+
+ns = [Q O];
+dnodes = 1:2;
+onodes = [2];
+eclass1 = [1 2];
+eclass2 = [3 2];
+bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'eclass1', eclass1, 'eclass2', eclass2, ...
+ 'observed', onodes);
+
+for seed=1:10
+rand('state', seed);
+prior = normalise(rand(Q,1));
+transmat = mk_stochastic(rand(Q,Q));
+obsmat = mk_stochastic(rand(Q,O));
+bnet.CPD{1} = tabular_CPD(bnet, 1, prior);
+bnet.CPD{2} = tabular_CPD(bnet, 2, obsmat);
+bnet.CPD{3} = tabular_CPD(bnet, 3, transmat);
+
+
+% Create a sequence
+T = 5;
+ev = sample_dbn(bnet, T);
+evidence = cell(2,T);
+evidence(2,:) = ev(2,:); % extract observed component
+data = cell2num(ev(2,:));
+
+%obslik = mk_dhmm_obs_lik(data, obsmat);
+obslik = multinomial_prob(data, obsmat);
+path = viterbi_path(prior, transmat, obslik);
+
+engine = {};
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+
+mpe = find_mpe(engine{1}, evidence);
+
+assert(isequal(cell2num(mpe(1,:)), path)) % extract values of hidden nodes
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/water1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/water1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+% Compare the speeds of various inference engines on the water DBN
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+bnet = mk_water_dbn;
+
+T = 3;
+engine = {};
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+engine{end+1} = smoother_engine(hmm_2TBN_inf_engine(bnet));
+engine{end+1} = jtree_dbn_inf_engine(bnet);
+engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+
+%engine{end+1} = bk_inf_engine(bnet, 'ff', onodes);
+%engine{end+1} = bk_inf_engine(bnet, { [1 2], [3 4 5 6], [7 8] }, onodes);
+
+inf_time = cmp_inference_dbn(bnet, engine, T)
+learning_time = cmp_learning_dbn(bnet, engine, T)
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/water2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/water2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+% Compare the speeds of various inference engines on the water DBN
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+bnet = mk_water_dbn;
+
+T = 3;
+
+engine = {};
+engine{end+1} = smoother_engine(jtree_2TBN_inf_engine(bnet));
+engine{end+1} = smoother_engine(jtree_sparse_2TBN_inf_engine(bnet));
+engine{end+1} = smoother_engine(hmm_2TBN_inf_engine(bnet));
+engine{end+1} = jtree_dbn_inf_engine(bnet);
+%engine{end+1} = jtree_unrolled_dbn_inf_engine(bnet, T);
+
+%engine{end+1} = bk_inf_engine(bnet, 'ff', onodes);
+%engine{end+1} = bk_inf_engine(bnet, { [1 2], [3 4 5 6], [7 8] }, onodes);
+
+inf_time = cmp_inference_dbn(bnet, engine, T)
+%learning_time = cmp_learning_dbn(bnet, engine, T)
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/limids/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/limids/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/amnio.m/1.1.1.1/Mon Sep 13 03:21:04 2004//
+/asia_dt1.m/1.1.1.1/Mon Jun 7 15:53:54 2004//
+/id1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/oil1.m/1.1.1.1/Mon Sep 13 02:27:08 2004//
+/pigs1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/limids/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/limids/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/limids
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/limids/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/limids/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/limids/amnio.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/limids/amnio.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,135 @@
+
+clear all
+B0 = 1; Rtriple = 2; Damnio = 3;
+B1 = 4; Ramnio = 5; Dabort = 6;
+B2 = 7; U = 8;
+
+N = 8;
+dag = zeros(N,N);
+dag(B0, [Rtriple B1 Ramnio]) = 1;
+dag(Rtriple, [Damnio Dabort]) = 1;
+dag(Damnio, [B1 Ramnio]) = 1;
+dag(B1, B2) = 1;
+dag(Ramnio, [Dabort U]) = 1;
+dag(Dabort, B2) = 1;
+dag(B2, U) = 1;
+
+
+
+ns = zeros(1,N);
+ns(B0) = 2;
+ns(B1) = 3;
+ns(B2) = 4;
+ns(Rtriple) = 2;
+ns(Ramnio) = 3;
+ns(Damnio) = 2;
+ns(Dabort) = 2;
+ns(U) = 1;
+
+limid = mk_limid(dag, ns, 'chance', [B0 B1 B2], ...
+ 'decision', [Damnio Dabort], 'utility', [U]);
+
+% states of nature
+healthy = 1; downs = 2; miscarry = 3; aborted = 4;
+% test results
+pos = 1; neg = 2; unk = 3;
+% actions
+yes = 1; no = 2;
+
+% Prior probability baby has downs syndrome
+tbl = zeros(2,1);
+p = 1/1000; % from www.downs-syndrome.org.uk figure
+p = 24/10000; % www-personal.umich.edu/~bobwolfe/560/review/Downs.pdf (for women agen 35-40)
+tbl(healthy) = 1-p;
+tbl(downs) = p;
+limid.CPD{B0} = tabular_CPD(limid, B0, tbl);
+
+% Reliability of triple screen test
+% Unreliable sensor
+% B0 -> Rtriple
+tbl = zeros(2,2); % Rtriple = pos, neg
+p = 0.5; % high false positive rate (guess)
+tbl(healthy, :) = [p 1-p];
+p = 0.6; % low detection rate (march of dimes figure)
+tbl(downs, :) = [p 1-p];
+limid.CPD{Rtriple} = tabular_CPD(limid, Rtriple, tbl);
+
+limid.CPD{Damnio} = tabular_decision_node(limid, Damnio);
+
+% Effect of amnio on baby B0,Damnio -> B1
+ % 1/200 risk of miscarry
+p = 1/200; % (march of dimes figure)
+tbl = zeros(2, 2, 3); % B1 = healthy, downs, miscarry
+tbl(healthy, no, :) = [1 0 0];
+tbl(downs, no, :) = [0 1 0];
+tbl(healthy, yes, :) = [1-p 0 p];
+tbl(downs, yes, :) = [0 1-p p];
+limid.CPD{B1} = tabular_CPD(limid, B1, tbl);
+
+% Reliability of amnio B0, Damnio -> Ramnio
+% Perfect sensor
+tbl = zeros(2,2,3); % Ramnio = pos, neg, unk
+tbl(:, no, :) = repmat([0 0 1], 2 ,1);
+tbl(healthy, yes, :) = [0 1 0];
+tbl(downs, yes, :) = [1 0 0];
+limid.CPD{Ramnio} = tabular_CPD(limid, Ramnio, tbl);
+
+limid.CPD{Dabort} = tabular_decision_node(limid, Dabort);
+
+% Effect of abortion on baby B1, Dabort -> B2
+tbl = zeros(3, 2, 4); % B2 = healthy, downs, miscarry, aborted
+tbl(:, yes, :) = repmat([0 0 0 1], 3, 1);
+tbl(healthy, no, :) = [1 0 0 0];
+tbl(downs, no, :) = [0 1 0 0];
+tbl(miscarry, no, :) = [0 0 1 0];
+limid.CPD{B2} = tabular_CPD(limid, B2, tbl);
+
+% Utility U(Ramnio, B2)
+tbl = zeros(3, 4);
+tbl(:, healthy) = 5000;
+tbl(:, downs) = -50000;
+tbl(:, miscarry) = -1000;
+tbl(:, aborted) = -1000;
+
+if 0
+%tbl(unk, miscarry) = 0; % this case is impossible
+tbl(pos, miscarry) = -1;
+tbl(neg, miscarry) = -1000;
+if 1
+ tbl(unk, aborted) = -100;
+ tbl(pos, aborted) = -1;
+ tbl(neg, aborted) = -500;
+else % pro-life utility fn
+ tbl(unk, aborted) = -500000;
+ tbl(pos, aborted) = -500000;
+ tbl(neg, aborted) = -500000;
+end
+end
+
+limid.CPD{U} = tabular_utility_node(limid, U, tbl);
+
+
+
+engine = jtree_limid_inf_engine(limid);
+[strategy, MEU] = solve_limid(engine);
+
+% Rtriple U(Damnio=1=yes) U(Damnio=2=no)
+% 1=pos 0 1
+% 2=neg 0 1
+dispcpt(strategy{Damnio})
+if isequal(strategy{Damnio}(1,:), strategy{Damnio}(2,:))
+ % Rtriple result irrelevant
+ doAmnio = argmax(strategy{Damnio}(1,:))
+else
+ doAmnio = 1;
+end
+
+% Rtriple Ramnio U(Dabort=yes=1) U(Dabort=no=2)
+% 1=pos 1=pos 1 0
+% 2=neg 1=pos 1 0
+% 1=pos 2=neg 0 1
+% 2=neg 2=neg 0 1
+% 1=pos 3=unk 0 1
+% 2=neg 3=unk 0 1
+dispcpt(strategy{Dabort})
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/limids/asia_dt1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/limids/asia_dt1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,100 @@
+% decision theoretic version of asia network
+% Cowell et al, p177
+% We explicitely add the no-forgetting arcs.
+
+Smoking = 1;
+VisitToAsia = 2;
+Bronchitis = 3;
+LungCancer = 4;
+TB = 5;
+Do_Xray = 6;
+TBorCancer = 7;
+Util_Xray = 8;
+Dys = 9;
+posXray = 10;
+Do_Hosp = 11;
+Util_Hosp = 12;
+
+n = 12;
+dag = zeros(n);
+dag(Smoking, [Bronchitis LungCancer]) = 1;
+dag(VisitToAsia, [TB Do_Xray Do_Hosp]) = 1;
+dag(Bronchitis, Dys) = 1;
+dag(LungCancer, [Util_Hosp TBorCancer]) = 1;
+dag(TB, [Util_Hosp TBorCancer Util_Xray]) = 1;
+dag(Do_Xray, [posXray Util_Xray Do_Hosp]) = 1;
+dag(TBorCancer, [Dys posXray]) = 1;
+dag(Dys, Do_Hosp) = 1;
+dag(posXray, Do_Hosp) = 1;
+dag(Do_Hosp, Util_Hosp) = 1;
+
+dnodes = [Do_Xray Do_Hosp];
+unodes = [Util_Xray Util_Hosp];
+cnodes = mysetdiff(1:n, [dnodes unodes]); % chance nodes
+ns = 2*ones(1,n);
+ns(unodes) = 1;
+limid = mk_limid(dag, ns, 'chance', cnodes, 'decision', dnodes, 'utility', unodes);
+
+% 1 = yes, 2 = no
+limid.CPD{VisitToAsia} = tabular_CPD(limid, VisitToAsia, [0.01 0.99]);
+limid.CPD{Bronchitis} = tabular_CPD(limid, Bronchitis, [0.6 0.3 0.4 0.7]);
+limid.CPD{Dys} = tabular_CPD(limid, Dys, [0.9 0.7 0.8 0.1 0.1 0.3 0.2 0.9]);
+limid.CPD{TBorCancer} = tabular_CPD(limid, TBorCancer, [1 1 1 0 0 0 0 1]);
+
+limid.CPD{LungCancer} = tabular_CPD(limid, LungCancer, [0.1 0.01 0.9 0.99]);
+limid.CPD{Smoking} = tabular_CPD(limid, Smoking, [0.5 0.5]);
+limid.CPD{TB} = tabular_CPD(limid, TB, [0.05 0.01 0.95 0.99]);
+limid.CPD{posXray} = tabular_CPD(limid, posXray, [0.98 0.5 0.05 0.5 0.02 0.5 0.95 0.5]);
+
+limid.CPD{Util_Hosp} = tabular_utility_node(limid, Util_Hosp, [180 120 160 15 2 4 0 40]);
+limid.CPD{Util_Xray} = tabular_utility_node(limid, Util_Xray, [0 1 10 10]);
+
+for i=dnodes(:)'
+ limid.CPD{i} = tabular_decision_node(limid, i);
+end
+
+engines = {};
+engines{end+1} = global_joint_inf_engine(limid);
+engines{end+1} = jtree_limid_inf_engine(limid);
+%engines{end+1} = belprop_inf_engine(limid);
+
+exact = [1 2];
+%approx = 3;
+approx = [];
+
+
+NE = length(engines);
+MEU = zeros(1, NE);
+niter = zeros(1, NE);
+strategy = cell(1, NE);
+
+tol = 1e-2;
+for e=1:length(engines)
+ [strategy{e}, MEU(e), niter(e)] = solve_limid(engines{e});
+end
+
+for e=exact(:)'
+ assert(approxeq(MEU(e), 47.49, tol))
+ assert(isequal(strategy{e}{Do_Xray}(:)', [1 0 0 1]))
+
+ % Check the hosptialize strategy is correct (p180)
+ % We assume the patient has not been to Asia and therefore did not have an Xray.
+ % In this case it is optimal not to hospitalize regardless of whether the patient has
+ % dyspnoea or not (and of course regardless of the value of pos_xray).
+ asia = 2;
+ do_xray = 2;
+ for dys = 1:2
+ for pos_xray = 1:2
+ assert(argmax(squeeze(strategy{e}{Do_Hosp}(asia, do_xray, dys, pos_xray, :))) == 2)
+ end
+ end
+end
+
+
+for e=approx(:)'
+ approxeq(strategy{exact(1)}{Do_Xray}, strategy{e}{Do_Xray})
+ approxeq(strategy{exact(1)}{Do_Hosp}, strategy{e}{Do_Hosp})
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/limids/id1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/limids/id1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+% influence diagram with no loops
+%
+% rv dec
+% \ /
+% utility
+
+N = 3;
+dag = zeros(N);
+X = 1; D = 2; U = 3;
+dag([X D], U)=1;
+
+ns = zeros(1,N);
+ns(X) = 2; ns(D) = 2; ns(U) = 1;
+
+limid = mk_limid(dag, ns, 'chance', X, 'decision', D, 'utility', U);
+
+% use random params
+limid.CPD{X} = tabular_CPD(limid, X);
+limid.CPD{D} = tabular_decision_node(limid, D);
+limid.CPD{U} = tabular_utility_node(limid, U);
+
+%fname = '/home/cs/murphyk/matlab/Misc/loopybel.txt';
+global BNT_HOME
+fname = sprintf('%s/loopybel.txt', BNT_HOME);
+
+engines = {};
+engines{end+1} = global_joint_inf_engine(limid);
+engines{end+1} = jtree_limid_inf_engine(limid);
+%engines{end+1} = belprop_inf_engine(limid, 'max_iter', 2*N, 'filename', fname);
+engines{end+1} = belprop_inf_engine(limid, 'max_iter', 2*N);
+
+exact = [1 2];
+approx = 3;
+
+E = length(engines);
+strategy = cell(1, E);
+MEU = zeros(1, E);
+for e=1:E
+ [strategy{e}, MEU(e)] = solve_limid(engines{e});
+ MEU
+end
+MEU
+
+for e=exact(:)'
+ assert(approxeq(strategy{exact(1)}{D}, strategy{e}{D}))
+end
+
+for e=approx(:)'
+ approxeq(strategy{exact(1)}{D}, strategy{e}{D})
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/limids/oil1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/limids/oil1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,91 @@
+% oil wildcatter influence diagram in Cowell et al p172
+
+% T = test for oil?
+% UT = utility (negative cost) of testing
+% O = amount of oil = Dry, Wet or Soaking
+% R = results of test = NoStrucure, OpenStructure, ClosedStructure or NoResult
+% D = drill?
+% UD = utility of drilling
+
+% Decision sequence = T R D O
+
+T = 1; UT = 2; O = 3; R = 4; D = 5; UD = 6;
+N = 6;
+dag = zeros(N);
+dag(T, [UT R D]) = 1;
+dag(O, [R UD]) = 1;
+dag(R, D) = 1;
+dag(D, UD) = 1;
+
+ns = zeros(1,N);
+ns(O) = 3; ns(R) = 4; ns(T) = 2; ns(D) = 2; ns(UT) = 1; ns(UD) = 1;
+
+limid = mk_limid(dag, ns, 'chance', [O R], 'decision', [T D], 'utility', [UT UD]);
+
+limid.CPD{O} = tabular_CPD(limid, O, [0.5 0.3 0.2]);
+tbl = [0.6 0 0.3 0 0.1 0 0.3 0 0.4 0 0.4 0 0.1 0 0.3 0 0.5 0 0 1 0 1 0 1];
+limid.CPD{R} = tabular_CPD(limid, R, tbl);
+
+limid.CPD{UT} = tabular_utility_node(limid, UT, [-10 0]);
+limid.CPD{UD} = tabular_utility_node(limid, UD, [-70 50 200 0 0 0]);
+
+if 1
+ % start with uniform policies
+ limid.CPD{T} = tabular_decision_node(limid, T);
+ limid.CPD{D} = tabular_decision_node(limid, D);
+else
+ % hard code optimal policies
+ limid.CPD{T} = tabular_decision_node(limid, T, [1.0 0.0]);
+ a = 0.5; b = 1-a; % arbitrary value
+ tbl = myreshape([0 a 1 a 1 a a a 1 b 0 b 0 b b b], ns([T R D]));
+ limid.CPD{D} = tabular_decision_node(limid, D, tbl);
+end
+
+%fname = '/home/cs/murphyk/matlab/Misc/loopybel.txt';
+
+engines = {};
+engines{end+1} = global_joint_inf_engine(limid);
+engines{end+1} = jtree_limid_inf_engine(limid);
+%engines{end+1} = belprop_inf_engine(limid, 'max_iter', 3*N, 'filename', fname);
+
+exact = [1 2];
+%approx = 3;
+approx = [];
+
+E = length(engines);
+strategy = cell(1, E);
+MEU = zeros(1, E);
+for e=1:E
+ [strategy{e}, MEU(e)] = solve_limid(engines{e});
+ MEU
+end
+MEU
+
+for e=exact(:)'
+ assert(approxeq(MEU(e), 22.5))
+ % U(T=yes) U(T=no)
+ % 1 0
+ assert(argmax(strategy{e}{T}) == 1); % test = yes
+ t = 1; % test = yes
+ % strategy{D} T R U(D=yes=1) U(D=no=2)
+ % 1=yes 1=noS 0 1 Don't drill
+ % 2=no 1=noS 1 0
+ % 1=yes 2=opS 1 0
+ % 2=no 2=opS 1 0
+ % 1=yes 3=clS 1 0
+ % 2=no 3=clS 1 0
+ % 1=yes 4=unk 1 0
+ % 2=no 4=unk 1 0
+
+ for r=[2 3] % OpS, ClS
+ assert(argmax(squeeze(strategy{e}{D}(t,r,:))) == 1); % drill = yes
+ end
+ r = 1; % noS
+ assert(argmax(squeeze(strategy{e}{D}(t,r,:))) == 2); % drill = no
+end
+
+
+for e=approx(:)'
+ approxeq(strategy{exact(1)}{T}, strategy{e}{T})
+ approxeq(strategy{exact(1)}{D}, strategy{e}{D})
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/limids/pigs1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/limids/pigs1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,153 @@
+% pigs model from Lauritzen and Nilsson, 2001
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+% we number nodes down and to the right
+h = [1 5 9 13];
+t = [2 6 10];
+d = [3 7 11];
+u = [4 8 12 14];
+
+N = 14;
+dag = zeros(N);
+
+% causal arcs
+for i=1:3
+ dag(h(i), [t(i) h(i+1)]) = 1;
+ dag(d(i), [u(i) h(i+1)]) = 1;
+end
+dag(h(4), u(4)) = 1;
+
+% information arcs
+fig = 2;
+switch fig
+ case 0,
+ % no info arcs
+ case 1,
+ % no-forgetting policy (figure 1)
+ for i=1:3
+ dag(t(i), d(i:3)) = 1;
+ end
+ case 2,
+ % reactive policy (figure 2)
+ for i=1:3
+ dag(t(i), d(i)) = 1;
+ end
+ case 7,
+ % omniscient policy (figure 7: di has access to hidden state h(i-1))
+ dag(t(1), d(1)) = 1;
+ for i=2:3
+ %dag([h(i-1) t(i-1) d(i-1)], d(i)) = 1;
+ dag([h(i-1) d(i-1)], d(i)) = 1; % t(i-1) is redundant given h(i-1)
+ end
+end
+
+
+ns = 2*ones(1,N);
+ns(u) = 1;
+
+% parameter tying
+params = ones(1,N);
+uparam = 1;
+final_uparam = 2;
+tparam = 3;
+h1_param = 4;
+hparam = 5;
+dparams = 6:8;
+
+params(u(1:3)) = uparam;
+params(u(4)) = final_uparam;
+params(t) = tparam;
+params(h(1)) = h1_param;
+params(h(2:end)) = hparam;
+params(d) = dparams;
+
+limid = mk_limid(dag, ns, 'chance', [h t], 'decision', d, 'utility', u, 'equiv_class', params);
+
+% h = 1 means healthy, h = 2 means diseased
+% d = 1 means don't treat, d = 2 means treat
+% t = 1 means test shows healthy, t = 2 means test shows diseased
+
+if 0
+ % use random params
+ limid.CPD{final_uparam} = tabular_utility_node(limid, u(4));
+ limid.CPD{uparam} = tabular_utility_node(limid, u(1));
+ limid.CPD{tparam} = tabular_CPD(limid, t(1));
+ limid.CPD{h1_param} = tabular_CPD(limid, h(1));
+ limid.CPD{hparam} = tabular_CPD(limid, h(2));
+else
+ limid.CPD{final_uparam} = tabular_utility_node(limid, u(4), [1000 300]);
+ limid.CPD{uparam} = tabular_utility_node(limid, u(1), [0 -100]); % costs have negative utility!
+
+ % h P(t=1) P(t=2)
+ % 1 0.9 0.1
+ % 2 0.2 0.8
+ limid.CPD{tparam} = tabular_CPD(limid, t(1), [0.9 0.2 0.1 0.8]);
+
+ % P(h1)
+ limid.CPD{h1_param} = tabular_CPD(limid, h(1), [0.9 0.1]);
+
+ % hi di P(hj=1) P(hj=2), j = i+1, i=1:3
+ % 1 1 0.8 0.2
+ % 2 1 0.1 0.9
+ % 1 2 0.9 0.1
+ % 2 2 0.5 0.5
+ limid.CPD{hparam} = tabular_CPD(limid, h(2), [0.8 0.1 0.9 0.5 0.2 0.9 0.1 0.5]);
+end
+
+% Decision nodes get assigned uniform policies by default
+for i=1:3
+ limid.CPD{dparams(i)} = tabular_decision_node(limid, d(i));
+end
+
+
+fname = '/home/cs/murphyk/matlab/Misc/loopybel.txt';
+
+engines = {};
+engines{end+1} = global_joint_inf_engine(limid);
+engines{end+1} = jtree_limid_inf_engine(limid);
+%engines{end+1} = belprop_inf_engine(limid, 'max_iter', 1*N, 'filename', fname, 'tol', 1e-3);
+
+exact = [1 2];
+%approx = 3;
+approx = [];
+
+max_iter = 1;
+order = d(end:-1:1);
+%order = d(1:end);
+
+NE = length(engines);
+MEU = zeros(1, NE);
+niter = zeros(1, NE);
+strategy = cell(1, NE);
+for e=1:NE
+ [strategy{e}, MEU(e), niter(e)] = solve_limid(engines{e}, 'max_iter', max_iter, 'order', order);
+end
+MEU
+
+% check results match those in the paper (p. 22)
+direct_policy = eye(2); % treat iff test is positive
+never_policy = [1 0; 1 0]; % never treat
+tol = 1e-0; % results in paper are reported to 0dp
+for e=exact(:)'
+ switch fig
+ case 2, % reactive policy
+ assert(approxeq(MEU(e), 727, tol));
+ assert(approxeq(strategy{e}{d(1)}(:), never_policy(:)))
+ assert(approxeq(strategy{e}{d(2)}(:), direct_policy(:)))
+ assert(approxeq(strategy{e}{d(3)}(:), direct_policy(:)))
+ case 1, assert(approxeq(MEU(e), 729, tol));
+ case 7, assert(approxeq(MEU(e), 732, tol));
+ end
+end
+
+
+for e=approx(:)'
+ for i=1:3
+ approxeq(strategy{exact(1)}{d(i)}, strategy{e}{d(i)})
+ dispcpt(strategy{e}{d(i)})
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+/belprop_loop1_discrete.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/belprop_loop1_gauss.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/belprop_loopy_cg.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/belprop_loopy_discrete.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/belprop_loopy_gauss.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/belprop_polytree_cg.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/belprop_polytree_discrete.m/1.1.1.1/Tue Oct 1 18:21:26 2002//
+/belprop_polytree_gauss.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/bp1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/gmux1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/Belprop
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loop1_discrete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loop1_discrete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+% Compare different loopy belief propagation algorithms on a graph with a single loop.
+% LBP should give exact results if it converges.
+
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+ns = 2*ones(1,N);
+bnet = mk_bnet(dag, ns);
+for i=1:N
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+engines = {};
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel');
+engines{end+1} = belprop_fg_inf_engine(bnet_to_fgraph(bnet));
+engines{end+1} = belprop_inf_engine(bnet, 'protocol', 'parallel');
+
+% belprop_fg does not support marginal_family
+% belprop_fg and belprop do not support loglik even on discrete
+[time, engines] = cmp_inference_static(bnet, engines, 'maximize', 0, 'exact', 1, 'observed', 2, ...
+ 'check_ll', 0, 'singletons_only', 1, 'check_converged', 2:4);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loop1_gauss.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loop1_gauss.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+% Compare different loopy belief propagation algorithms on a graph with a single loop.
+% LBP should give exact results if it converges.
+
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+ns = 2*ones(1,N);
+bnet = mk_bnet(dag, ns, 'discrete', []);
+for i=1:N
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+end
+
+engines = {};
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel', 'max_iter', 20);
+%engines{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel', 'max_iter', 20, 'filename', ...
+% '/home/eecs/murphyk/matlab/gausspearl.txt', 'tol', 1e-5);
+
+% pearl gaussian does not compute loglik
+[time, engines] = cmp_inference_static(bnet, engines, 'maximize', 0, 'exact', 1, 'observed', [2], ...
+ 'check_ll', 0, 'singletons_only', 0, 'check_converged', [2]);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loopy_cg.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loopy_cg.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+% Same as cg1, except we assume all discretes are observed,
+% and use loopy for approximate inference.
+
+ns = 2*ones(1,9);
+F = 1; W = 2; E = 3; B = 4; C = 5; D = 6; Min = 7; Mout = 8; L = 9;
+n = 9;
+dnodes = [B F W];
+cnodes = mysetdiff(1:n, dnodes);
+
+%bnet = mk_incinerator_bnet(ns);
+bnet = mk_incinerator_bnet;
+
+bnet.observed = [dnodes E];
+
+engines = {};
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel');
+nengines = length(engines);
+
+
+[time, engines] = cmp_inference_static(bnet, engines, 'maximize', 0, 'check_ll', 0, ...
+ 'singletons_only', 0, 'exact', 1, 'check_converged', 2);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loopy_discrete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loopy_discrete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+% Compare different loopy belief propagation algorithms on a graph with many loops
+
+bnet = mk_asia_bnet('orig');
+
+engines = {};
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel');
+engines{end+1} = belprop_fg_inf_engine(bnet_to_fgraph(bnet));
+engines{end+1} = belprop_inf_engine(bnet, 'protocol', 'parallel');
+
+[time, engines] = cmp_inference_static(bnet, engines, 'maximize', 0, 'exact', 1, 'observed', [1 3 5], ...
+ 'check_ll', 0, 'singletons_only', 1, 'check_converged', 2:4);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loopy_gauss.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_loopy_gauss.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+% Compare different loopy belief propagation algorithms on a graph with many loops
+% If LBP converges, the means should be exact
+
+bnet = mk_asia_bnet('gauss');
+
+engines = {};
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel');
+
+[time, engines] = cmp_inference_static(bnet, engines, 'maximize', 0, 'exact', 1, 'observed', [1 3 5], ...
+ 'check_ll', 0, 'singletons_only', 0, 'check_converged', 2);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_polytree_cg.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_polytree_cg.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,33 @@
+% Inference on a conditional Gaussian model
+
+% Make the following polytree, where all arcs point down
+
+% 1 2
+% \ /
+% 3
+% / \
+% 4 5
+
+N = 5;
+dag = zeros(N,N);
+dag(1,3) = 1;
+dag(2,3) = 1;
+dag(3, [4 5]) = 1;
+
+ns = [2 1 2 1 2];
+
+dnodes = 1;
+%onodes = [1 5];
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'observed', dnodes);
+
+bnet.CPD{1} = tabular_CPD(bnet, 1);
+for i=2:N
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+end
+
+engine = {};
+engine{end+1} = jtree_inf_engine(bnet);
+engine{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel');
+
+[time, engine] = cmp_inference_static(bnet, engine, 'maximize', 0, 'check_ll', 0, ...
+ 'singletons_only', 0, 'observed', [1 3]);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_polytree_discrete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_polytree_discrete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+% Make the following polytree, where all arcs point down
+
+% 1 2
+% \ /
+% 3
+% / \
+% 4 5
+
+N = 5;
+dag = zeros(N,N);
+dag(1,3) = 1;
+dag(2,3) = 1;
+dag(3, [4 5]) = 1;
+
+ns = 2*ones(1,N); % binary nodes
+
+onodes = [1 5];
+
+bnet = mk_bnet(dag, ns, 'observed', onodes);
+
+if 0
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+end
+
+for i=1:N
+ %bnet.CPD{i} = tabular_CPD(bnet, i);
+ bnet.CPD{i} = noisyor_CPD(bnet, i);
+end
+
+engine = {};
+engine{end+1} = jtree_inf_engine(bnet);
+engine{end+1} = pearl_inf_engine(bnet, 'protocol', 'tree');
+engine{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel');
+
+[err, time] = cmp_inference_static(bnet, engine, 'maximize', 0, 'check_ll', 1);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_polytree_gauss.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/belprop_polytree_gauss.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,135 @@
+% Do the example from Satnam Alag's PhD thesis, UCB ME dept 1996 p46
+
+% Make the following polytree, where all arcs point down
+
+% 1 2
+% \ /
+% 3
+% / \
+% 4 5
+
+N = 5;
+dag = zeros(N,N);
+dag(1,3) = 1;
+dag(2,3) = 1;
+dag(3, [4 5]) = 1;
+
+ns = [2 1 2 1 2];
+
+bnet = mk_bnet(dag, ns, 'discrete', []);
+
+bnet.CPD{1} = gaussian_CPD(bnet, 1, 'mean', [1 0]', 'cov', [4 1; 1 4]);
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', 1, 'cov', 1);
+B1 = [1 2; 1 0]; B2 = [2 1]';
+bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', [0 0]', 'cov', [2 1; 1 1], ...
+ 'weights', [B1 B2]);
+H1 = [1 1];
+bnet.CPD{4} = gaussian_CPD(bnet, 4, 'mean', 0, 'cov', 1, 'weights', H1);
+H2 = [1 0; 1 1];
+bnet.CPD{5} = gaussian_CPD(bnet, 5, 'mean', [0 0]', 'cov', eye(2), 'weights', H2);
+
+engine = {};
+engine{end+1} = jtree_inf_engine(bnet);
+engine{end+1} = pearl_inf_engine(bnet, 'protocol', 'tree');
+engine{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel');
+E = length(engine);
+
+if 1
+% no evidence
+evidence = cell(1,N);
+ll = zeros(1,E);
+for e=1:E
+ [engine{e}, ll(e)] = enter_evidence(engine{e}, evidence);
+ add_ev = 1;
+ m = marginal_nodes(engine{e}, 3, add_ev);
+ assert(approxeq(m.mu, [3 2]'))
+ assert(approxeq(m.Sigma, [30 9; 9 6]))
+
+ m = marginal_nodes(engine{e}, 4, add_ev);
+ assert(approxeq(m.mu, 5))
+ assert(approxeq(m.Sigma, 55))
+
+ m = marginal_nodes(engine{e}, 5, add_ev);
+ assert(approxeq(m.mu, [3 5]'))
+ assert(approxeq(m.Sigma, [31 39; 39 55]))
+end
+end
+
+if 1
+% evidence on leaf 5
+evidence = cell(1,N);
+evidence{5} = [5 5]';
+for e=1:E
+ [engine{e}, ll(e)] = enter_evidence(engine{e}, evidence);
+ add_ev = 1;
+ m = marginal_nodes(engine{e}, 3, add_ev);
+ assert(approxeq(m.mu, [4.4022 1.0217]'))
+ assert(approxeq(m.Sigma, [0.7011 -0.4891; -0.4891 1.1087]))
+
+ m = marginal_nodes(engine{e}, 4, add_ev);
+ assert(approxeq(m.mu, 5.4239))
+ assert(approxeq(m.Sigma, 1.8315))
+
+ m = marginal_nodes(engine{e}, 1, add_ev);
+ assert(approxeq(m.mu, [0.3478 1.1413]'))
+ assert(approxeq(m.Sigma, [1.8261 -0.1957; -0.1957 1.0924]))
+
+ m = marginal_nodes(engine{e}, 2, add_ev);
+ assert(approxeq(m.mu, 0.9239))
+ assert(approxeq(m.Sigma, 0.8315))
+
+ m = marginal_nodes(engine{e}, 5, add_ev);
+ assert(approxeq(m.mu, evidence{5}))
+ assert(approxeq(m.Sigma, zeros(2)))
+end
+end
+
+if 1
+% evidence on leaf 4 (non-info-state version is uninvertible)
+evidence = cell(1,N);
+evidence{4} = 10;
+for e=1:E
+ [engine{e}, ll(e)] = enter_evidence(engine{e}, evidence);
+ add_ev = 1;
+ m = marginal_nodes(engine{e}, 3, add_ev);
+ assert(approxeq(m.mu, [6.5455 3.3636]'))
+ assert(approxeq(m.Sigma, [2.3455 -1.6364; -1.6364 1.9091]))
+
+ m = marginal_nodes(engine{e}, 5, add_ev);
+ assert(approxeq(m.mu, [6.5455 9.9091]'))
+ assert(approxeq(m.Sigma, [3.3455 0.7091; 0.7091 1.9818]))
+
+ m = marginal_nodes(engine{e}, 1, add_ev);
+ assert(approxeq(m.mu, [1.9091 0.9091]'))
+ assert(approxeq(m.Sigma, [2.1818 -0.8182; -0.8182 2.1818]))
+
+ m = marginal_nodes(engine{e}, 2, add_ev);
+ assert(approxeq(m.mu, 1.2727))
+ assert(approxeq(m.Sigma, 0.8364))
+end
+end
+
+
+if 1
+% evidence on leaves 4,5 and root 2
+evidence = cell(1,N);
+evidence{2} = 0;
+evidence{4} = 10;
+evidence{5} = [5 5]';
+for e=1:E
+ [engine{e}, ll(e)] = enter_evidence(engine{e}, evidence);
+ add_ev = 1;
+ m = marginal_nodes(engine{e}, 3, add_ev);
+ assert(approxeq(m.mu, [4.9964 2.4444]'));
+ assert(approxeq(m.Sigma, [0.6738 -0.5556; -0.5556 0.8889]));
+
+ m = marginal_nodes(engine{e}, 1, add_ev);
+ assert(approxeq(m.mu, [2.2043 1.2151]'));
+ assert(approxeq(m.Sigma, [1.2903 -0.4839; -0.4839 0.8065]));
+end
+end
+
+if 1
+ [time, engine] = cmp_inference_static(bnet, engine, 'maximize', 0, 'check_ll', 0, ...
+ 'singletons_only', 0, 'observed', [1 3 5]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/bp1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/bp1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+% Compare different loopy belief propagation algorithms on a graph with a single loop.
+% LBP should give exact results if it converges.
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+N = 2;
+dag = zeros(N,N);
+dag(1,2)=1;
+ns = ones(1,N);
+bnet = mk_bnet(dag, ns, 'discrete', []);
+for i=1:N
+ %bnet.CPD{i} = gaussian_CPD(bnet, i, 'mean', 0);
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+end
+
+engines = {};
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = pearl_inf_engine(bnet, 'protocol', 'tree');
+
+[time, engines] = cmp_inference_static(bnet, engines, 'maximize', 0, 'exact', 1:2, 'observed', [2], ...
+ 'check_ll', 0, 'singletons_only', 1, 'check_converged', []);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/gmux1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Belprop/gmux1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,90 @@
+% Test gmux.
+% The following model, where Y is a gmux node,
+% and M is set to 1, should be equivalent to X1 -> Y
+%
+% X1 Xn M
+% \ | /
+% Y
+
+n = 3;
+N = n+2;
+Xs = 1:n;
+M = n+1;
+Y = n+2;
+dag = zeros(N,N);
+dag([Xs M], Y)=1;
+
+dnodes = M;
+ns = zeros(1, N);
+sz = 2;
+ns(Xs) = sz;
+ns(M) = n;
+ns(Y) = sz;
+
+bnet = mk_bnet(dag, ns, 'discrete', M, 'observed', [M Y]);
+
+psz = ns(Xs(1));
+selfsz = ns(Y);
+
+W = randn(selfsz, psz);
+mu = randn(selfsz, 1);
+Sigma = eye(selfsz, selfsz);
+
+bnet.CPD{M} = root_CPD(bnet, M);
+for i=Xs(:)'
+ bnet.CPD{i} = gaussian_CPD(bnet, i, 'mean', zeros(psz, 1), 'cov', eye(psz, psz));
+end
+bnet.CPD{Y} = gmux_CPD(bnet, Y, 'mean', mu, 'weights', W, 'cov', Sigma);
+
+evidence = cell(1,N);
+yval = randn(selfsz, 1);
+evidence{Y} = yval;
+m = 2;
+%notm = not(m-1)+1; % only valid for n=2
+notm = mysetdiff(1:n, m);
+evidence{M} = m;
+
+engines = {};
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = pearl_inf_engine(bnet, 'protocol', 'parallel');
+
+for e=1:length(engines)
+ engines{e} = enter_evidence(engines{e}, evidence);
+ mXm{e} = marginal_nodes(engines{e}, Xs(m));
+
+ % Since M=m, only Xm was updated.
+ % Hence the posterior on Xnotm should equal the prior.
+ for i=notm(:)'
+ mXnotm = marginal_nodes(engines{e}, Xs(i));
+ assert(approxeq(mXnotm.mu, zeros(psz,1)))
+ assert(approxeq(mXnotm.Sigma, eye(psz, psz)))
+ end
+end
+
+% Check that all engines give the same posterior
+for e=2:length(engines)
+ assert(approxeq(mXm{e}.mu, mXm{1}.mu))
+ assert(approxeq(mXm{e}.Sigma, mXm{1}.Sigma))
+end
+
+
+% Compute the correct posterior by building Xm -> Y
+
+N = 2;
+dag = zeros(N,N);
+dag(1, 2)=1;
+ns = [psz selfsz];
+bnet = mk_bnet(dag, ns, 'discrete', [], 'observed', 2);
+
+bnet.CPD{1} = gaussian_CPD(bnet, 1, 'mean', zeros(psz, 1), 'cov', eye(psz, psz));
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', mu, 'cov', Sigma, 'weights', W);
+
+jengine = jtree_inf_engine(bnet);
+evidence = {[], yval};
+jengine = enter_evidence(jengine, evidence); % apply Bayes rule to invert the arc
+mX = marginal_nodes(jengine, 1);
+
+for e=1:length(engines)
+ assert(approxeq(mX.mu, mXm{e}.mu))
+ assert(approxeq(mX.Sigma, mXm{e}.Sigma))
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/Belief_IOhmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/Belief_IOhmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+% Sigmoid Belief IOHMM
+% Here is the model
+%
+% X \ X \
+% | | | |
+% Q-|->Q-|-> ...
+% | / | /
+% Y Y
+%
+clear all;
+clc;
+rand('state',0); randn('state',0);
+X = 1; Q = 2; Y = 3;
+% intra time-slice graph
+intra=zeros(3);
+intra(X,[Q Y])=1;
+intra(Q,Y)=1;
+% inter time-slice graph
+inter=zeros(3);
+inter(Q,Q)=1;
+
+ns = [1 3 1];
+dnodes = [2];
+eclass1 = [1 2 3];
+eclass2 = [1 4 3];
+bnet = mk_dbn(intra, inter, ns, dnodes, eclass1, eclass2);
+bnet.CPD{1} = root_CPD(bnet, 1);
+% ==========================================================
+bnet.CPD{2} = softmax_CPD(bnet, 2);
+bnet.CPD{4} = softmax_CPD(bnet, 5, 'discrete', [2]);
+% ==========================================================
+bnet.CPD{3} = gaussian_CPD(bnet, 3);
+
+% make some data
+T=20;
+cases = cell(3, T);
+cases(1,:)=num2cell(round(rand(1,T)*2)+1);
+%cases(2,:)=num2cell(round(rand(1,T))+1);
+cases(3,:)=num2cell(rand(1,T));
+
+engine = bk_inf_engine(bnet, 'exact', [1 2 3]);
+
+% log lik before learning
+[engine, loglik] = enter_evidence(engine, cases);
+
+% do learning
+ev=cell(1,1);
+ev{1}=cases;
+[bnet2, LL2] = learn_params_dbn_em(engine, ev, 3);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/Belief_hmdt.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/Belief_hmdt.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+% Sigmoid Belief Hidden Markov Decision Tree (Jordan/Gharhamani 1996)
+%
+clear all;
+%clc;
+rand('state',0); randn('state',0);
+X = 1; Q1 = 2; Q2 = 3; Y = 4;
+% intra time-slice graph
+intra=zeros(4);
+intra(X,[Q1 Q2 Y])=1;
+intra(Q1,[Q2 Y])=1;
+intra(Q2, Y)=1;
+% inter time-slice graph
+inter=zeros(4);
+inter(Q1,Q1)=1;
+inter(Q2,Q2)=1;
+
+ns = [1 2 3 1];
+dnodes = [2 3];
+eclass1 = [1 2 3 4];
+eclass2 = [1 5 6 4];
+bnet = mk_dbn(intra, inter, ns, dnodes, eclass1, eclass2);
+
+bnet.CPD{1} = root_CPD(bnet, 1);
+% =========================================
+bnet.CPD{2} = softmax_CPD(bnet, 2);
+bnet.CPD{3} = softmax_CPD(bnet, 3, 'discrete', [2]);
+bnet.CPD{5} = softmax_CPD(bnet, 6);
+bnet.CPD{6} = softmax_CPD(bnet, 7, 'discrete', [3 6]);
+% =========================================
+bnet.CPD{4} = gaussian_CPD(bnet, 4);
+
+% make some data
+T=20;
+cases = cell(4, T);
+cases(1,:)=num2cell(round(rand(1,T)*2)+1);
+%cases(2,:)=num2cell(round(rand(1,T))+1);
+%cases(3,:)=num2cell(round(rand(1,T)*2)+1);
+cases(4,:)=num2cell(rand(1,T));
+
+engine = bk_inf_engine(bnet, 'exact', [1 2 3 4]);
+
+% log lik before learning
+[engine, loglik] = enter_evidence(engine, cases);
+
+% do learning
+ev=cell(1,1);
+ev{1}=cases;
+[bnet2, LL2] = learn_params_dbn_em(engine, ev, 10);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/Belief_hme.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/Belief_hme.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+% Sigmoid Belief Hierarchical Mixtures of Experts
+
+clear all
+clc
+X = 1;
+Q1 = 2;
+Q2 = 3;
+Y = 4;
+dag = zeros(4,4);
+dag(X,[Q1 Q2 Y]) = 1;
+dag(Q1, [Q2 Y]) = 1;
+dag(Q2,Y)=1;
+ns = [1 3 4 3];
+dnodes = [2 3 4];
+onodes=[1 2 3 4];
+bnet = mk_bnet(dag,ns, dnodes);
+
+rand('state',0); randn('state',0);
+
+bnet.CPD{1} = root_CPD(bnet, 1);
+bnet.CPD{2} = softmax_CPD(bnet, 2, 'max_iter', 3);
+bnet.CPD{3} = softmax_CPD(bnet, 3, 'discrete', [2], 'max_iter', 3);
+bnet.CPD{4} = softmax_CPD(bnet, 4, 'discrete', [2 3], 'max_iter', 3);
+
+T=5;
+cases = cell(4, T);
+cases(1,:)=num2cell(rand(1,T));
+%cases(2,:)=num2cell(round(rand(1,T)*2)+1);
+%cases(3,:)=num2cell(round(rand(1,T)*3)+1);
+cases(4,:)=num2cell(round(rand(1,T)*2)+1);
+
+engine = jtree_inf_engine(bnet, onodes);
+
+[engine, loglik] = enter_evidence(engine, cases);
+
+disp('learning-------------------------------------------')
+[bnet2, LL2] = learn_params_em(engine, cases, 4);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/Belief_IOhmm.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/Belief_hmdt.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/Belief_hme.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/Sigmoid_Belief.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/Brutti
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/Sigmoid_Belief.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Brutti/Sigmoid_Belief.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+% Sigmoid Belief Net
+
+clear all
+clc
+dum1 = 1;
+dum2 = 2;
+dum3 = 3;
+Q1 = 4;
+Q2 = 5;
+Y = 6;
+dag = zeros(6,6);
+dag(dum1,[Q1 Y]) = 1;
+dag(dum2, Q2)=1;
+dag(dum3, [Q1 Q2])=1;
+dag(Q1,[Q2 Y]) = 1;
+dag(Q2, Y)=1;
+
+ns = [2 2 3 3 4 3];
+dnodes = [1:6];
+bnet = mk_bnet(dag,ns, dnodes);
+
+rand('state',0); randn('state',0);
+n_iter=10;
+clamped=0;
+
+bnet.CPD{1} = tabular_CPD(bnet, 1);
+bnet.CPD{2} = tabular_CPD(bnet, 2);
+bnet.CPD{3} = tabular_CPD(bnet, 3);
+% CPD = dsoftmax_CPD(bnet, self, dummy_pars, w, b, clamped, max_iter, verbose, wthresh,...
+% llthresh, approx_hess)
+bnet.CPD{4} = softmax_CPD(bnet, 4, 'discrete', [1 3]);
+bnet.CPD{5} = softmax_CPD(bnet, 5, 'discrete', [2 3]);
+bnet.CPD{6} = softmax_CPD(bnet, 6, 'discrete', [1 4]);
+
+T=5;
+cases = cell(6, T);
+cases(1,:)=num2cell(round(rand(1,T)*1)+1);
+%cases(2,:)=num2cell(round(rand(1,T)*1)+1);
+cases(3,:)=num2cell(round(rand(1,T)*2)+1);
+cases(4,:)=num2cell(round(rand(1,T)*2)+1);
+%cases(5,:)=num2cell(round(rand(1,T)*3)+1);
+cases(6,:)=num2cell(round(rand(1,T)*2)+1);
+
+engine = jtree_inf_engine(bnet);
+
+[engine, loglik] = enter_evidence(engine, cases);
+
+disp('learning-------------------------------------------')
+[bnet2, LL2, eng2] = learn_params_em(engine, cases, n_iter);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+/brainy.m/1.1.1.1/Sun Feb 22 19:43:32 2004//
+/burglar-alarm-net.lisp.txt/1.1.1.1/Thu Mar 4 22:27:48 2004//
+/burglary.m/1.1.1.1/Thu Mar 4 22:34:14 2004//
+/cg1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/cg2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/cmp_inference_static.m/1.2/Sat Sep 17 16:59:57 2005//
+/discrete1.m/1.1.1.1/Mon Jun 7 19:45:06 2004//
+/discrete2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/discrete3.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/fa1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/gaussian1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/gaussian2.m/1.1.1.1/Thu Jun 10 01:31:02 2004//
+/gibbs_test1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/learn1.m/1.1.1.1/Sat Feb 28 17:25:40 2004//
+/lw1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mfa1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mixexp1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mixexp2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mixexp3.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mog1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mpe1.m/1.1.1.1/Wed Jun 19 22:08:58 2002//
+/mpe2.m/1.1.1.1/Wed Jun 19 22:09:08 2002//
+/nodeorderExample.m/1.1.1.1/Thu Jun 10 01:42:04 2004//
+/qmr1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/qmr2.m/1.1.1.1/Thu Nov 14 01:01:46 2002//
+/sample1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/softev1.m/1.1.1.1/Wed Jun 19 23:59:18 2002//
+/softmax1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sprinkler1.m/1.1.1.1/Sun Sep 12 21:01:38 2004//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+A D/Belprop////
+A D/Brutti////
+A D/HME////
+A D/Misc////
+A D/Models////
+A D/SCG////
+A D/StructLearn////
+A D/Zoubin////
+A D/dtree////
+A D/fgraph////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+/HMEforMatlab.jpg/1.1.1.1/Wed May 29 15:59:54 2002//
+/README/1.1.1.1/Wed May 29 15:59:54 2002//
+/fhme.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/gen_data.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/hme_class_plot.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/hme_reg_plot.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/hme_topobuilder.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/hmemenu.m/1.1.1.1/Thu Feb 12 12:57:28 2004//
+/test_data_class.mat/1.1.1.1/Wed May 29 15:59:54 2002//
+/test_data_class2.mat/1.1.1.1/Wed May 29 15:59:54 2002//
+/test_data_reg.mat/1.1.1.1/Wed May 29 15:59:54 2002//
+/train_data_class.mat/1.1.1.1/Wed May 29 15:59:54 2002//
+/train_data_reg.mat/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/HME
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/HMEforMatlab.jpg
Binary file toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/HMEforMatlab.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/README
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/README Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+This directory contains code for hierarchical mixture of experts,
+written by Pierpaolo Brutti (May 2001). Run the file hmemenu to get started.
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/fhme.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/fhme.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,109 @@
+function risultati = fhme(net, nodes_info, data, n)
+%HMEFWD Forward propagation through an HME model
+%
+% Each row of the (n x class_num) matrix 'risultati' containes the estimated class posterior prob.
+%
+% ----------------------------------------------------------------------------------------------------
+% -> pierpaolo_b@hotmail.com or -> pampo@interfree.it
+% ----------------------------------------------------------------------------------------------------
+%
+ns=net.node_sizes;
+if nargin==3
+ ndata=n;
+else
+ ndata=size(data, 1);
+end
+altezza=size(ns,2);
+coeff=cell(altezza-1,1);
+for m=1:ndata
+ %- i=2 --------------------------------------------------------------------------------------
+ s=struct(net.CPD{2});
+ if nodes_info(1,2)==0,
+ mu=[]; W=[]; predict=[];
+ mu=s.mean(:,:);
+ W=s.weights(:,:,:);
+ predict=mu(:,:)+W(:,:,:)*data(m,:)';
+ coeff{1,1}=predict';
+ elseif nodes_info(1,2)==1,
+ coeff{1,1}=fglm(s.glim{1}, data(m,:));
+ else,
+ coeff{1,1}=fmlp(s.mlp{1}, data(m,:));
+ end
+ %----------------------------------------------------------------------------------------------
+ if altezza>3,
+ for i=3:altezza-1,
+ s=[]; f=[]; dpsz=[];
+ f=family(net.dag,i); f=f(2:end-1); dpsz=prod(ns(f));
+ s=struct(net.CPD{i});
+ for j=1:dpsz,
+ if nodes_info(1,i)==1,
+ coeff{i-1,1}(j,:)=coeff{i-2,1}(1,j)*fglm(s.glim{j}, data(m,:));
+ else
+ coeff{i-1,1}(j,:)=coeff{i-2,1}(1,j)*fmlp(s.mlp{j}, data(m,:));
+ end
+ end
+ app=cat(2, coeff{i-1,1}(:)); coeff{i-1,1}=app'; clear app;
+ end
+ end
+ %- i=altezza ----------------------------------------------------------------------------------
+ if altezza>2,
+ i=altezza;
+ s=[]; f=[]; dpsz=[];
+ f=family(net.dag,i); f=f(2:end-1); dpsz=prod(ns(f));
+ s=struct(net.CPD{i});
+ if nodes_info(1,i)==0,
+ mu=[]; W=[];
+ mu=s.mean(:,:);
+ W=s.weights(:,:,:);
+ end
+ for j=1:dpsz,
+ if nodes_info(1,i)==0,
+ predict=[];
+ predict=mu(:,j)+W(:,:,j)*data(m,:)';
+ coeff{i-1,1}(j,:)=coeff{i-2,1}(1,j)*predict';
+ elseif nodes_info(1,i)==1,
+ coeff{i-1,1}(j,:)=coeff{i-2,1}(1,j)*fglm(s.glim{j}, data(m,:));
+ else
+ coeff{i-1,1}(j,:)=coeff{i-2,1}(1,j)*fmlp(s.mlp{j}, data(m,:));
+ end
+ end
+ end
+ %----------------------------------------------------------------------------------------------
+ risultati(m,:)=sum(coeff{altezza-1,1},1);
+ clear coeff; coeff=cell(altezza-1,1);
+end
+return
+
+%-------------------------------------------------------------------
+
+function [y, a] = fglm(net, x)
+%GLMFWD Forward propagation through 1-layer net->GLM statistical model
+
+ndata = size(x, 1);
+
+a = x*net.w1 + ones(ndata, 1)*net.b1;
+
+nout = size(a,2);
+% Ensure that sum(exp(a), 2) does not overflow
+maxcut = log(realmax) - log(nout);
+% Ensure that exp(a) > 0
+mincut = log(realmin);
+a = min(a, maxcut);
+a = max(a, mincut);
+temp = exp(a);
+y = temp./(sum(temp, 2)*ones(1,nout));
+
+%-------------------------------------------------------------------
+
+function [y, z, a] = fmlp(net, x)
+%MLPFWD Forward propagation through 2-layer network.
+
+ndata = size(x, 1);
+
+z = tanh(x*net.w1 + ones(ndata, 1)*net.b1);
+a = z*net.w2 + ones(ndata, 1)*net.b2;
+temp = exp(a);
+nout = size(a,2);
+y = temp./(sum(temp,2)*ones(1,nout));
+
+%-------------------------------------------------------------------
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/gen_data.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/gen_data.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function [data, ndata1, ndata2, targets]=gen_data(ndata, seed)
+% Generate data from three classes in 2d
+% Setting 'seed' for reproducible results
+% OUTPUT
+% data : data set
+% ndata1, ndata2: separator
+
+if nargin<1,
+ error('Missing data size');
+end
+
+input_dim = 2;
+num_classes = 3;
+
+if nargin==2,
+ % Fix seeds for reproducible results
+ randn('state', seed);
+ rand('state', seed);
+end
+
+% Generate mixture of three Gaussians in two dimensional space
+data = randn(ndata, input_dim);
+targets = zeros(ndata, 3);
+
+% Priors for the clusters
+prior(1) = 0.4;
+prior(2) = 0.3;
+prior(3) = 0.3;
+
+% Cluster centres
+c = [2.0, 2.0; 0.0, 0.0; 1, -1];
+
+ndata1 = round(prior(1)*ndata);
+ndata2 = round((prior(1) + prior(2))*ndata);
+% Put first cluster at (2, 2)
+data(1:ndata1, 1) = data(1:ndata1, 1) * 0.5 + c(1,1);
+data(1:ndata1, 2) = data(1:ndata1, 2) * 0.5 + c(1,2);
+targets(1:ndata1, 1) = 1;
+
+% Leave second cluster at (0,0)
+data((ndata1 + 1):ndata2, :) = data((ndata1 + 1):ndata2, :);
+targets((ndata1+1):ndata2, 2) = 1;
+
+data((ndata2+1):ndata, 1) = data((ndata2+1):ndata,1) *0.6 + c(3, 1);
+data((ndata2+1):ndata, 2) = data((ndata2+1):ndata,2) *0.6 + c(3, 2);
+targets((ndata2+1):ndata, 3) = 1;
+
+if 0
+ ndata = 1;
+ data = x;
+ targets = [1 0 0];
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/hme_class_plot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/hme_class_plot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,142 @@
+function fh=hme_class_plot(net, nodes_info, train_data, test_data)
+%
+% Use this function ONLY when the input dimension is 2
+% and the problem is a classification one.
+% We assume that each row of 'train_data' & 'test_data' is an example.
+%
+%------Line Spec------------------------------------------------------------------------
+%
+% LineWidth - specifies the width (in points) of the line
+% MarkerEdgeColor - specifies the color of the marker or the edge color
+% forfilled markers (circle, square, diamond, pentagram, hexagram, and the
+% four triangles).
+% MarkerFaceColor - specifies the color of the face of filled markers.
+% MarkerSize - specifies the size of the marker in points.
+%
+% Example
+% -------
+% plot(t,sin(2*t),'-mo',...
+% 'LineWidth',2,...
+% 'MarkerEdgeColor','k',... % 'k'=black
+% 'MarkerFaceColor',[.49 1 .63],... % RGB color
+% 'MarkerSize',12)
+%----------------------------------------------------------------------------------------
+
+class_num=nodes_info(2,end);
+mn_x = round(min(train_data(:,1))); mx_x = round(max(train_data(:,1)));
+mn_y = round(min(train_data(:,2))); mx_y = round(max(train_data(:,2)));
+if nargin==4,
+ mn_x = round(min([train_data(:,1); test_data(:,1)]));
+ mx_x = round(max([train_data(:,1); test_data(:,1)]));
+ mn_y = round(min([train_data(:,2); test_data(:,2)]));
+ mx_y = round(max([train_data(:,1); test_data(:,2)]));
+end
+x = mn_x(1)-1:0.2:mx_x(1)+1;
+y = mn_y(1)-1:0.2:mx_y(1)+1;
+[X, Y] = meshgrid(x,y);
+X = X(:);
+Y = Y(:);
+num_g=size(X,1);
+griglia = [X Y];
+rand('state',1);
+if class_num<=6,
+ colors=['r'; 'g'; 'b'; 'c'; 'm'; 'y'];
+else
+ colors=rand(class_num, 3); % each row is an RGB color
+end
+fh = figure('Name','Data & decision boundaries', 'MenuBar', 'none', 'NumberTitle', 'off');
+ms=5; % Marker Size
+if nargin==4,
+% ms=4; % Marker Size
+ subplot(1,2,1);
+end
+% Plot of train_set -------------------------------------------------------------------------
+axis([mn_x-1 mx_x+1 mn_y-1 mx_y+1]);
+set(gca, 'Box', 'on');
+c_max_train = max(train_data(:,3));
+hold on
+for m=1:c_max_train,
+ app_x=train_data(:,1);
+ app_y=train_data(:,2);
+ thisX=app_x(train_data(:,3)==m);
+ thisY=app_y(train_data(:,3)==m);
+ if class_num<=6,
+ str_col=[];
+ str_col=['o', colors(m,:)];
+ plot(thisX, thisY, str_col, 'MarkerSize', ms);
+ else
+ plot(thisX, thisY, 'o',...
+ 'LineWidth', 1,...
+ 'MarkerEdgeColor', colors(m,:), 'MarkerSize', ms)
+ end
+end
+%---hmefwd_generale(net,data,ndata)-----------------------------------------------------------
+Z=fhme(net, nodes_info, griglia, num_g); % forward propagation trougth the HME
+%---------------------------------------------------------------------------------------------
+[foo , class] = max(Z'); % 0/1 loss function => we assume that the true class is the one with the
+ % maximum posterior prob.
+class = class';
+for m = 1:class_num,
+ thisX=[]; thisY=[];
+ thisX = X(class == m);
+ thisY = Y(class == m);
+ if class_num<=6,
+ str_col=[];
+ str_col=['d', colors(m,:)];
+ h=plot(thisX, thisY, str_col);
+ else
+ h = plot(thisX, thisY, 'd',...
+ 'MarkerEdgeColor',colors(m,:),...
+ 'MarkerFaceColor','w');
+ end
+ set(h, 'MarkerSize', 4);
+end
+title('Training set and Decision Boundaries (0/1 loss)')
+hold off
+
+% Plot of test_set --------------------------------------------------------------------------
+if nargin==4,
+ subplot(1,2,2);
+ axis([mn_x-1 mx_x+1 mn_y-1 mx_y+1]);
+ set(gca, 'Box', 'on');
+ hold on
+ if size(test_data,2)==3, % we know the classification of the test set examples
+ c_max_test = max(test_data(:,3));
+ for m=1:c_max_test,
+ app_x=test_data(:,1);
+ app_y=test_data(:,2);
+ thisX=app_x(test_data(:,3)==m);
+ thisY=app_y(test_data(:,3)==m);
+ if class_num<=6,
+ str_col=[];
+ str_col=['o', colors(m,:)];
+ plot(thisX, thisY, str_col, 'MarkerSize', ms);
+ else
+ plot(thisX, thisY, 'o',...
+ 'LineWidth', 1,...
+ 'MarkerEdgeColor', colors(m,:),...
+ 'MarkerSize',ms);
+ end
+ end
+ else
+ plot(test_data(:,1), test_data(:,2), 'ko',...
+ 'MarkerSize', ms);
+ end
+ for m = 1:class_num,
+ thisX=[]; thisY=[];
+ thisX = X(class == m);
+ thisY = Y(class == m);
+ if class_num<=6,
+ str_col=[];
+ str_col=['d', colors(m,:)];
+ h=plot(thisX, thisY, str_col);
+ else
+ h = plot(thisX, thisY, 'd',...
+ 'MarkerEdgeColor', colors(m,:),...
+ 'MarkerFaceColor','w');
+ end
+ set(h, 'MarkerSize', 4);
+ end
+ title('Test set and Decision Boundaries (0/1 loss)')
+ hold off
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/hme_reg_plot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/hme_reg_plot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+function fh=hme_reg_plot(net, nodes_info, train_data, test_data)
+%
+% Use this function ONLY when the input dimension is 1
+% and the problem is a regression one.
+% We assume that each row of 'train_data' & 'test_data' is an example.
+%
+% ----------------------------------------------------------------------------------------------------
+% -> pierpaolo_b@hotmail.com or -> pampo@interfree.it
+% ----------------------------------------------------------------------------------------------------
+
+fh=figure('Name','HME based regression', 'MenuBar', 'none', 'NumberTitle', 'off');
+
+mn_x_train = round(min(train_data(:,1)));
+mx_x_train = round(max(train_data(:,1)));
+x_train = mn_x_train(1):0.01:mx_x_train(1);
+Z_train=fhme(net, nodes_info, x_train',size(x_train,2)); % forward propagation trougth the HME
+
+if nargin==4,
+ subplot(2,1,1);
+ mn_x_test = round(min(test_data(:,1)));
+ mx_x_test = round(max(test_data(:,1)));
+ x_test = mn_x_test(1):0.01:mx_x_test(1);
+ Z_test=fhme(net, nodes_info, x_test',size(x_test,2)); % forward propagation trougth the HME
+end
+
+hold on;
+set(gca, 'Box', 'on');
+plot(x_train', Z_train, 'r');
+plot(train_data(:,1),train_data(:,2),'+k');
+title('Training set and prediction');
+hold off
+
+if nargin==4,
+ subplot(2,1,2);
+ hold on;
+ set(gca, 'Box', 'on');
+ plot(x_train', Z_train, 'r');
+ if size(test_data,2)==2,
+ plot(test_data(:,1),test_data(:,2),'+k');
+ end
+ title('Test set and prediction');
+ hold off
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/hme_topobuilder.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/hme_topobuilder.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+function [bnet, onodes]=hme_topobuilder(nodes_info);
+%
+% HME topology builder
+%
+% ----------------------------------------------------------------------------------------------------
+% -> pierpaolo_b@hotmail.com or -> pampo@interfree.it
+% ----------------------------------------------------------------------------------------------------
+
+nodes_num=size(nodes_info,2);
+dag = zeros(nodes_num);
+list=[1:nodes_num];
+for i=1:(nodes_num-1)
+ app=[];
+ app=list((i+1):end);
+ dag(i,app) = 1;
+end
+onodes = [1 nodes_num];
+dnodes = list(2:end-1);
+if nodes_info(1,end)>0,
+ dnodes=[dnodes nodes_num];
+end
+ns = nodes_info(2,:);
+
+bnet = mk_bnet(dag, ns, dnodes);
+clamped = 0;
+
+bnet.CPD{1} = root_CPD(bnet, 1);
+
+rand('state', 50);
+randn('state', 50);
+
+for i=2:nodes_num,
+ if (nodes_info(1,i)==0)&(nodes_info(4,i)==1),
+ bnet.CPD{i} = gaussian_CPD(bnet, i, [], [], [], 'full');
+ elseif (nodes_info(1,i)==0)&(nodes_info(4,i)==2),
+ bnet.CPD{i} = gaussian_CPD(bnet, i, [], [], [], 'diag');
+ elseif (nodes_info(1,i)==0)&(nodes_info(4,i)==3),
+ bnet.CPD{i} = gaussian_CPD(bnet, i, [], [], [], 'full', 'tied');
+ elseif (nodes_info(1,i)==0)&(nodes_info(4,i)==4),
+ bnet.CPD{i} = gaussian_CPD(bnet, i, [], [], [], 'diag', 'tied');
+ elseif nodes_info(1,i)==1,
+ %bnet.CPD{i} = dsoftmax_CPD(bnet, i, [], [], clamped, nodes_info(4,i));
+ bnet.CPD{i} = softmax_CPD(bnet, i, 'clamped', clamped, 'max_iter', nodes_info(4,i));
+ else
+ bnet.CPD{i} = mlp_CPD(bnet, i, nodes_info(3,i), [], [], [], [], clamped, nodes_info(4,i));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/hmemenu.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/hmemenu.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,552 @@
+% dataset -> (1=>user data) or (2=>toy example)
+% type -> (1=> Regression model) or (2=>Classification model)
+% num_glevel -> number of hidden nodes in the net (gating levels)
+% num_exp -> number of experts in the net
+% branch_fact -> dimension of the hidden nodes in the net
+% cov_dim -> root node dimension
+% res_dim -> output node dimension
+% nodes_info -> 4 x num_glevel+2 matrix that contain all the info about the nodes:
+% nodes_info(1,:) = nodes type: (0=>gaussian)or(1=>softmax)or(2=>mlp)
+% nodes_info(2,:) = nodes size: [cov_dim num_glevel x branch_fact res_dim]
+% nodes_info(3,:) = hidden units number (for mlp nodes)
+% |- optimizer iteration number (for softmax & mlp CPD)
+% nodes_info(4,:) =|- covariance type (for gaussian CPD)->
+% | (1=>Full)or(2=>Diagonal)or(3=>Full&Tied)or(4=>Diagonal&Tied)
+% fh1 -> Figure: data & decizion boundaries; fh2 -> confusion matrix; fh3 -> LL trace
+% test_data -> test data matrix
+% train_data -> training data matrix
+% ntrain -> size(train_data,2)
+% ntest -> size(test_data,2)
+% cases -> (cell array) training data formatted for the learning engine
+% bnet -> bayesian net before learning
+% bnet2 -> bayesian net after learning
+% ll -> log-likelihood before learning
+% LL2 -> log-likelihood trace
+% onodes -> obs nodes in bnet & bnet2
+% max_em_iter -> maximum number of interations of the EM algorithm
+% train_result -> prediction on the training set (as test_result)
+%
+% IMPORTANT: CHECK the loading path (lines 64 & 364)
+% ----------------------------------------------------------------------------------------------------
+% -> pierpaolo_b@hotmail.com or -> pampo@interfree.it
+% ----------------------------------------------------------------------------------------------------
+
+error('this no longer works with the latest version of BNT')
+
+clear all;
+clc;
+disp('---------------------------------------------------');
+disp(' Hierarchical Mixtures of Experts models builder ');
+disp('---------------------------------------------------');
+disp(' ')
+disp(' Using this script you can build both an HME model')
+disp('as in [Wat94] and [Jor94] i.e. with ''softmax'' gating')
+disp('nodes and ''gaussian'' ( for regression ) or ''softmax''')
+disp('( for classification ) expert node, and its variants')
+disp('called ''gated nets'' where we use ''mlp'' models in')
+disp('place of a number of ''softmax'' ones [Mor98], [Wei95].')
+disp(' You can decide to train and test the model on your')
+disp('datasets or to evaluate its performance on a toy')
+disp('example.')
+disp(' ')
+disp('Reference')
+disp('[Mor98] P. Moerland (1998):')
+disp(' Localized mixtures of experts. (http://www.idiap.ch/~perry/)')
+disp('[Jor94] M.I. Jordan, R.A. Jacobs (1994):')
+disp(' HME and the EM algorithm. (http://www.cs.berkeley.edu/~jordan/)')
+disp('[Wat94] S.R. Waterhouse, A.J. Robinson (1994):')
+disp(' Classification using HME. (http://www.oigeeza.com/steve/)')
+disp('[Wei95] A.S. Weigend, M. Mangeas (1995):')
+disp(' Nonlinear gated experts for time series.')
+disp(' ')
+
+if 0
+disp('(See the figure)')
+pause(5);
+%%%%%WARNING!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+im_path=which('HMEforMatlab.jpg');
+fig=imread(im_path, 'jpg');
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+figure('Units','pixels','MenuBar','none','NumberTitle','off', 'Name', 'HME model');
+image(fig);
+axis image;
+axis off;
+clear fig;
+set(gca,'Position',[0 0 1 1])
+disp('(Press any key to continue)')
+pause
+end
+
+clc
+disp('---------------------------------------------------');
+disp(' Specify the Architecture ');
+disp('---------------------------------------------------');
+disp(' ');
+disp('What kind of model do you need?')
+disp(' ')
+disp('1) Regression ')
+disp('2) Classification')
+disp(' ')
+type=input('1 or 2?: ');
+if (isempty(type)|(~ismember(type,[1 2]))), error('Invalid value'); end
+clc
+disp('----------------------------------------------------');
+disp(' Specify the Architecture ');
+disp('----------------------------------------------------');
+disp(' ')
+disp('Now you have to set the number of experts and gating')
+disp('levels in the net. This script builds only balanced')
+disp('hierarchy with the same branching factor (>1)at each')
+disp('(gating) level. So remember that: ')
+disp(' ')
+disp(' num_exp = branch_fact^num_glevel ')
+disp(' ')
+disp('with branch_fact >=2.')
+disp('You can also set to zeros the number of gating level')
+disp('in order to obtain a classical GLM model. ')
+disp(' ')
+disp('----------------------------------------------------');
+disp(' ')
+num_glevel=input('Insert the number of gating levels {0,...,20}: ');
+if (isempty(num_glevel)|(~ismember(num_glevel,[0:20]))), error('Invalid value'); end
+nodes_info=zeros(4,num_glevel+2);
+if num_glevel>0, %------------------------------------------------------------------------------------
+ for i=2:num_glevel+1,
+ clc
+ disp('----------------------------------------------------');
+ disp(' Specify the Architecture ');
+ disp('----------------------------------------------------');
+ disp(' ')
+ disp(['-> Gating network ', num2str(i-1), ' is a: '])
+ disp(' ')
+ disp(' 1) Softmax model');
+ disp(' 2) Two layer perceptron model')
+ disp(' ')
+ nodes_info(1,i)=input('1 or 2?: ');
+ if (isempty(nodes_info(1,i))|(~ismember(nodes_info(1,i),[1 2]))), error('Invalid value'); end
+ disp(' ')
+ if nodes_info(1,i)==2,
+ nodes_info(3,i)=input('Insert the number of units in the hidden layer: ');
+ if (isempty(nodes_info(3,i))|(floor(nodes_info(3,i))~=nodes_info(3,i))|(nodes_info(3,i)<=0)),
+ error(['Invalid value: ', num2str(nodes_info(3,i)), ' is not a positive integer!']);
+ end
+ disp(' ')
+ end
+ nodes_info(4,i)=input('Insert the optimizer iteration number: ');
+ if (isempty(nodes_info(4,i))|(floor(nodes_info(4,i))~=nodes_info(4,i))|(nodes_info(4,i)<=0)),
+ error(['Invalid value: ', num2str(nodes_info(4,i)), ' is not a positive integer!']);
+ end
+ end
+ clc
+ disp('---------------------------------------------------------');
+ disp(' Specify the Architecture ');
+ disp('---------------------------------------------------------');
+ disp(' ')
+ disp('Now you have to set the number of experts in the network');
+ disp('The value will be adjusted in order to obtain a hierarchy');
+ disp('as said above.')
+ disp(' ');
+ num_exp=input(['Insert the approximative number of experts (>=', num2str(2^num_glevel), '): ']);
+ if (isempty(num_exp)|(num_exp<=0)|(num_exp<2^num_glevel)),
+ error('Invalid value');
+ end
+ app1=0; base=2;
+ while app1=(2^num_glevel)&(abs(app2-num_exp)0-------------------------------------------------------------------------
+
+if type==2,
+ disp(['-> Expert node is a: '])
+ disp(' ')
+ disp(' 1) Softmax model');
+ disp(' 2) Two layer perceptron model')
+ disp(' ')
+ nodes_info(1,end)=input('1 or 2?: ');
+ if (isempty(nodes_info(1,end))|(~ismember(nodes_info(1,end),[1 2]))),
+ error('Invalid value');
+ end
+ disp(' ')
+ if nodes_info(1,end)==2,
+ nodes_info(3,end)=input('Insert the number of units in the hidden layer: ');
+ if (isempty(nodes_info(3,end))|(floor(nodes_info(3,end))~=nodes_info(3,end))|(nodes_info(3,end)<=0)),
+ error(['Invalid value: ', num2str(nodes_info(3,end)), ' is not a positive integer!']);
+ end
+ disp(' ')
+ end
+ nodes_info(4,end)=input('Insert the optimizer iteration number: ');
+ if (isempty(nodes_info(4,end))|(floor(nodes_info(4,end))~=nodes_info(4,end))|(nodes_info(4,end)<=0)),
+ error(['Invalid value: ', num2str(nodes_info(4,end)), ' is not a positive integer!']);
+ end
+elseif type==1,
+ disp('What kind of covariance matrix structure do you want?')
+ disp(' ')
+ disp(' 1) Full');
+ disp(' 2) Diagonal')
+ disp(' 3) Full & Tied');
+ disp(' 4) Diagonal & Tied')
+
+ disp(' ')
+ nodes_info(4,end)=input('1, 2, 3 or 4?: ');
+ if (isempty(nodes_info(4,end))|(~ismember(nodes_info(4,end),[1 2 3 4]))),
+ error('Invalid value');
+ end
+end
+clc
+disp('----------------------------------------------------');
+disp(' Specify the Input ');
+disp('----------------------------------------------------');
+disp(' ')
+disp('Do you want to...')
+disp(' ')
+disp('1) ...use your own dataset?')
+disp('2) ...apply the model on a toy example?')
+disp(' ')
+dataset=input('1 or 2?: ');
+if (isempty(dataset)|(~ismember(dataset,[1 2]))), error('Invalid value'); end
+if dataset==1,
+ if type==1,
+ clc
+ disp('-------------------------------------------------------');
+ disp(' Specify the Input - Regression problem ');
+ disp('-------------------------------------------------------');
+ disp(' ')
+ disp('Be sure that each row of your data matrix is an example');
+ disp('with the covariate values that precede the respond ones')
+ disp(' ')
+ disp('-------------------------------------------------------');
+ disp(' ')
+ cov_dim=input('Insert the covariate space dimension: ');
+ if (isempty(cov_dim)|(floor(cov_dim)~=cov_dim)|(cov_dim<=0)),
+ error(['Invalid value: ', num2str(cov_dim), ' is not a positive integer!']);
+ end
+ disp(' ')
+ res_dim=input('Insert the dimension of the respond variable: ');
+ if (isempty(res_dim)|(floor(res_dim)~=res_dim)|(res_dim<=0)),
+ error(['Invalid value: ', num2str(res_dim), ' is not a positive integer!']);
+ end
+ disp(' ');
+ elseif type==2
+ clc
+ disp('-------------------------------------------------------');
+ disp(' Specify the Input - Classification problem ');
+ disp('-------------------------------------------------------');
+ disp(' ')
+ disp('Be sure that each row of your data matrix is an example');
+ disp('with the covariate values that precede the class labels');
+ disp('(integer value >=1). ');
+ disp(' ')
+ disp('-------------------------------------------------------');
+ disp(' ')
+ cov_dim=input('Insert the covariate space dimension: ');
+ if (isempty(cov_dim)|(floor(cov_dim)~=cov_dim)|(cov_dim<=0)),
+ error(['Invalid value: ', num2str(cov_dim), ' is not a positive integer!']);
+ end
+ disp(' ')
+ res_dim=input('Insert the number of classes: ');
+ if (isempty(res_dim)|(floor(res_dim)~=res_dim)|(res_dim<=0)),
+ error(['Invalid value: ', num2str(res_dim), ' is not a positive integer!']);
+ end
+ disp(' ')
+ end
+ % ------------------------------------------------------------------------------------------------
+ % Loading training data --------------------------------------------------------------------------
+ % ------------------------------------------------------------------------------------------------
+ train_path=input('Insert the complete (with extension) path of the training data file:\n >> ','s');
+ if isempty(train_path), error('You must specify a data set for training!'); end
+ if ~isempty(findstr('.mat',train_path)),
+ ap=load(train_path); app=fieldnames(ap); train_data=eval(['ap.', app{1,1}]);
+ clear ap app;
+ elseif ~isempty(findstr('.txt',train_path)),
+ train_data=load(train_path, '-ascii');
+ else
+ error('Invalid data format: not a .mat or a .txt file')
+ end
+ if (size(train_data,2)~=cov_dim+res_dim)&(type==1),
+ error(['Invalid data matrix size: ', num2str(size(train_data,2)), ' columns rather than ',...
+ num2str(cov_dim+res_dim),'!']);
+ elseif (size(train_data,2)~=cov_dim+1)&(type==2),
+ error(['Invalid data matrix size: ', num2str(size(train_data,2)), ' columns rather than ',...
+ num2str(cov_dim+1),'!']);
+ elseif (~isempty(find(ismember(intersect([train_data(:,end)' 1:res_dim],...
+ train_data(:,end)'),[1:res_dim])==0)))&(type==2),
+ error('Invalid class label');
+ end
+ ntrain=size(train_data,1);
+ train_d=train_data(:,1:cov_dim);
+ if type==2,
+ train_t=zeros(ntrain, res_dim);
+ for m=1:res_dim,
+ train_t((find(train_data(:,end)==m))',m)=1;
+ end
+ else
+ train_t=train_data(:,cov_dim+1:end);
+ end
+ disp(' ')
+ % ------------------------------------------------------------------------------------------------
+ % Loading test data ------------------------------------------------------------------------------
+ % ------------------------------------------------------------------------------------------------
+ disp('(If you don''t want to specify a test-set press ''return'' only)');
+ test_path=input('Insert the complete (with extension) path of the test data file:\n >> ','s');
+ if ~isempty(test_path),
+ if ~isempty(findstr('.mat',test_path)),
+ ap=load(test_path); app=fieldnames(ap); test_data=eval(['ap.', app{1,1}]);
+ clear ap app;
+ elseif ~isempty(findstr('.txt',test_path)),
+ test_data=load(test_path, '-ascii');
+ else
+ error('Invalid data format: not a .mat or a .txt file')
+ end
+ if (size(test_data,2)~=cov_dim)&(size(test_data,2)~=cov_dim+res_dim)&(type==1),
+ error(['Invalid data matrix size: ', num2str(size(test_data,2)), ' columns rather than ',...
+ num2str(cov_dim+res_dim), ' or ', num2str(cov_dim), '!']);
+ elseif (size(test_data,2)~=cov_dim)&(size(test_data,2)~=cov_dim+1)&(type==2),
+ error(['Invalid data matrix size: ', num2str(size(test_data,2)), ' columns rather than ',...
+ num2str(cov_dim+1), ' or ', num2str(cov_dim), '!']);
+ elseif (~isempty(find(ismember(intersect([test_data(:,end)' 1:res_dim],...
+ test_data(:,end)'),[1:res_dim])==0)))&(type==2)&(size(test_data,2)==cov_dim+1),
+ error('Invalid class label');
+ end
+ ntest=size(test_data,1);
+ test_d=test_data(:,1:cov_dim);
+ if (type==2)&(size(test_data,2)>cov_dim),
+ test_t=zeros(ntest, res_dim);
+ for m=1:res_dim,
+ test_t((find(test_data(:,end)==m))',m)=1;
+ end
+ elseif (type==1)&(size(test_data,2)>cov_dim),
+ test_t=test_data(:,cov_dim+1:end);
+ end
+ disp(' ');
+ end
+else
+ clc
+ disp('----------------------------------------------------');
+ disp(' Specify the Input ');
+ disp('----------------------------------------------------');
+ disp(' ')
+ ntrain = input('Insert the number of examples in training (<500): ');
+ if (isempty(ntrain)|(floor(ntrain)~=ntrain)|(ntrain<=0)|(ntrain>500)),
+ error(['Invalid value: ', num2str(ntrain), ' is not a positive integer <500!']);
+ end
+ disp(' ')
+ test_path='toy';
+ ntest = input('Insert the number of examples in test (<500): ');
+ if (isempty(ntest)|(floor(ntest)~=ntest)|(ntest<=0)|(ntest>500)),
+ error(['Invalid value: ', num2str(ntest), ' is not a positive integer <500!']);
+ end
+
+ if type==2,
+ cov_dim=2;
+ res_dim=3;
+ seed = 42;
+ [train_d, ntrain1, ntrain2, train_t]=gen_data(ntrain, seed);
+ for m=1:ntrain
+ q=[]; q = find(train_t(m,:)==1);
+ train_data(m,:)=[train_d(m,:) q];
+ end
+ [test_d, ntest1, ntest2, test_t]=gen_data(ntest);
+ for m=1:ntest
+ q=[]; q = find(test_t(m,:)==1);
+ test_data(m,:)=[test_d(m,:) q];
+ end
+ else
+ cov_dim=1;
+ res_dim=1;
+ global HOME
+ %%%%%WARNING!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ load([HOME '/examples/static/Misc/mixexp_data.txt'], '-ascii');
+ %%%%%WARNING!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ train_data = mixexp_data(1:ntrain, :);
+ train_d=train_data(:,1:cov_dim); train_t=train_data(:,cov_dim+1:end);
+ test_data = mixexp_data(ntrain+1:ntrain+ntest, :);
+ test_d=test_data(:,1:cov_dim);
+ if size(test_data,2)>cov_dim,
+ test_t=test_data(:,cov_dim+1:end);
+ end
+ end
+end
+% Set the nodes dimension-----------------------------------
+if num_glevel>0,
+ nodes_info(2,2:num_glevel+1)=branch_fact;
+end
+nodes_info(2,1)=cov_dim; nodes_info(2,end)=res_dim;
+%-----------------------------------------------------------
+% Prepare the training data for the learning engine---------
+%-----------------------------------------------------------
+cases = cell(size(nodes_info,2), ntrain);
+for m=1:ntrain,
+ cases{1,m}=train_data(m,1:cov_dim)';
+ cases{end,m}=train_data(m,cov_dim+1:end)';
+end
+%-----------------------------------------------------------------------------------------------------
+[bnet onodes]=hme_topobuilder(nodes_info);
+engine = jtree_inf_engine(bnet, onodes);
+clc
+disp('---------------------------------------------------------------------');
+disp(' L E A R N I N G ');
+disp('---------------------------------------------------------------------');
+disp(' ')
+ll = 0;
+for l=1:ntrain
+ scritta=['example number: ', int2str(l),'---------------------------------------------'];
+ disp(scritta);
+ ev = cases(:,l);
+ [engine, loglik] = enter_evidence(engine, ev);
+ ll = ll + loglik;
+end
+disp(' ')
+disp(['Log-likelihood before learning: ', num2str(ll)]);
+disp(' ')
+disp('(Press any key to continue)');
+pause
+%-----------------------------------------------------------
+clc
+disp('---------------------------------------------------------------------');
+disp(' L E A R N I N G ');
+disp('---------------------------------------------------------------------');
+disp(' ')
+max_em_iter=input('Insert the maximum number of the EM algorithm iterations: ');
+if (isempty(max_em_iter)|(floor(max_em_iter)~=max_em_iter)|(max_em_iter<=1)),
+ error(['Invalid value: ', num2str(ntest), ' is not a positive integer >1!']);
+end
+disp(' ')
+disp(['Log-likelihood before learning: ', num2str(ll)]);
+disp(' ')
+
+[bnet2, LL2] = learn_params_em(engine, cases, max_em_iter);
+disp(' ')
+fprintf('HME: loglik before learning %f, after %d iters %f\n', ll, length(LL2), LL2(end));
+disp(' ')
+disp('(Press any key to continue)');
+pause
+%-----------------------------------------------------------------------------------
+% Classification problem: plot data & decision boundaries if the input data size = 2
+% Regression problem: plot data & prediction if the input data size = 1
+%-----------------------------------------------------------------------------------
+if (type==2)&(nodes_info(2,1)==2)&(~isempty(test_path)),
+ fh1=hme_class_plot(bnet2, nodes_info, train_data, test_data);
+ disp(' ');
+ disp('(See the figure)');
+elseif (type==2)&(nodes_info(2,1)==2)&(isempty(test_path)),
+ fh1=hme_class_plot(bnet2, nodes_info, train_data);
+ disp(' ');
+ disp('(See the figure)');
+elseif (type==1)&(nodes_info(2,1)==1)&(~isempty(test_path)),
+ fh1=hme_reg_plot(bnet2, nodes_info, train_data, test_data);
+ disp(' ');
+ disp('(See the figure)');
+elseif (type==1)&(nodes_info(2,1)==1)&(isempty(test_path)),
+ fh1=hme_reg_plot(bnet2, nodes_info, train_data);
+ disp(' ')
+ disp('(See the figure)');
+end
+%-----------------------------------------------------------------------------------
+% Classification problem: plot confusion matrix
+%-----------------------------------------------------------------------------------
+if (type==2)
+ ztrain=fhme(bnet2, nodes_info, train_d, size(train_d,1));
+ [Htrain, trainRate]=confmat(ztrain, train_t); % CM on the training set
+ fh2=figure('Name','Confusion matrix', 'MenuBar', 'none', 'NumberTitle', 'off');
+ if (~isempty(test_path))&(size(test_data,2)>cov_dim),
+ ztest=fhme(bnet2, nodes_info, test_d, size(test_d,1));
+ [Htest, testRate]=confmat(ztest, test_t); % CM on the test set
+ subplot(1,2,1);
+ end
+ plotmat(Htrain,'b','k',12)
+ tick=[0.5:1:(0.5+nodes_info(2,end)-1)];
+ set(gca,'XTick',tick)
+ set(gca,'YTick',tick)
+ grid('off')
+ ylabel('True')
+ xlabel('Prediction')
+ title(['Confusion Matrix: training set (' num2str(trainRate(1)) '%)'])
+ if (~isempty(test_path))&(size(test_data,2)>cov_dim),
+ subplot(1,2,2)
+ plotmat(Htest,'b','k',12)
+ set(gca,'XTick',tick)
+ set(gca,'YTick',tick)
+ grid('off')
+ ylabel('True')
+ xlabel('Prediction')
+ title(['Confusion Matrix: test set (' num2str(testRate(1)) '%)'])
+ end
+ disp(' ')
+ disp('(Press any key to continue)');
+ pause
+end
+%-----------------------------------------------------------------------------------
+% Regression & Classification problem: calculate the predictions & plot the LL trace
+%-----------------------------------------------------------------------------------
+train_result=fhme(bnet2,nodes_info,train_d,size(train_d,1));
+if ~isempty(test_path),
+ test_result=fhme(bnet2,nodes_info,test_d,size(test_d,1));
+end
+fh3=figure('Name','Log-likelihood trace', 'MenuBar', 'none', 'NumberTitle', 'off')
+plot(LL2,'-ro',...
+ 'MarkerEdgeColor','k',...
+ 'MarkerFaceColor',[1 1 0],...
+ 'MarkerSize',4)
+title('Log-likelihood trace')
+%-----------------------------------------------------------------------------------
+% Regression & Classification problem: save the predictions
+%-----------------------------------------------------------------------------------
+clc
+disp('------------------------------------------------------------------');
+disp(' Save the results ');
+disp('------------------------------------------------------------------');
+disp(' ')
+%-----------------------------------------------------------------------------------
+save_quest_m=input('Do you want to save the HME model (Y/N)? [Y default]: ', 's');
+if isempty(save_quest_m),
+ save_quest_m='Y';
+end
+if ~findstr(save_quest_m, ['Y', 'N']), error('Invalid input'); end
+if save_quest_m=='Y',
+ disp(' ');
+ m_save=input('Insert the complete path for save the HME model (.mat):\n >> ', 's');
+ if isempty(m_save), error('You must specify a path!'); end
+ save(m_save, 'bnet2');
+end
+%-----------------------------------------------------------------------------------
+disp(' ')
+save_quest=input('Do you want to save the HME predictions (Y/N)? [Y default]: ', 's');
+disp(' ')
+if isempty(save_quest),
+ save_quest='Y';
+end
+if ~findstr(save_quest, ['Y', 'N']), error('Invalid input'); end
+if save_quest=='Y',
+ tr_save=input('Insert the complete path for save the training data prediction (.mat):\n >> ', 's');
+ if isempty(tr_save), error('You must specify a path!'); end
+ save(tr_save, 'train_result');
+ if ~isempty(test_path),
+ disp(' ')
+ te_save=input('Insert the complete path for save the test data prediction (.mat):\n >> ', 's');
+ if isempty(te_save), error('You must specify a path!'); end
+ save(te_save, 'test_result');
+ end
+end
+clc
+disp('----------------------------------------------------');
+disp(' B Y E ! ');
+disp('----------------------------------------------------');
+pause(2)
+%clear
+clc
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/test_data_class.mat
Binary file toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/test_data_class.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/test_data_class2.mat
Binary file toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/test_data_class2.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/test_data_reg.mat
Binary file toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/test_data_reg.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/train_data_class.mat
Binary file toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/train_data_class.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/train_data_reg.mat
Binary file toolboxes/FullBNT-1.0.7/bnt/examples/static/HME/train_data_reg.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/mixexp_data.txt/1.1.1.1/Wed May 29 15:59:54 2002//
+/mixexp_graddesc.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mixexp_plot.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sprinkler.bif/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/Misc
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/mixexp_data.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/mixexp_data.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1000 @@
+ 0.68 0.65
+ -0.31 -0.24
+ 0.85 0.94
+ -0.34 -0.31
+ -0.58 0.62
+ -0.04 -0.11
+ -0.82 0.61
+ -0.38 0.16
+ 0.71 0.75
+ 0.80 0.95
+ 0.66 0.64
+ -0.11 -0.03
+ -0.16 0.11
+ -0.47 0.75
+ 0.09 0.43
+ 0.04 -0.36
+ -0.80 0.77
+ -0.63 0.79
+ -0.13 0.46
+ 0.49 0.78
+ 0.93 1.12
+ 0.08 -0.35
+ 0.27 -0.19
+ -0.66 0.56
+ 0.98 1.02
+ 0.13 -0.16
+ -0.21 0.10
+ 0.41 0.29
+ 0.18 0.34
+ -0.46 0.21
+ 0.03 -0.24
+ 0.96 1.42
+ 0.96 1.45
+ 0.93 1.12
+ 0.08 -0.21
+ -0.49 0.39
+ 0.66 0.21
+ -0.70 0.45
+ -0.81 0.57
+ -0.17 0.10
+ -0.22 0.03
+ 0.24 0.32
+ -0.07 0.31
+ -0.47 0.58
+ 0.41 0.28
+ -0.16 0.03
+ 0.97 0.79
+ -0.07 -0.09
+ 0.22 0.32
+ -0.71 0.73
+ 0.51 0.22
+ 0.21 0.57
+ 0.64 0.58
+ -0.65 0.74
+ 0.05 0.55
+ 0.41 0.67
+ -0.96 1.36
+ -0.35 0.03
+ -0.38 -0.15
+ 0.61 0.71
+ -0.93 0.75
+ 0.98 0.99
+ 0.58 0.61
+ 0.53 0.62
+ -0.24 -0.04
+ -0.84 1.07
+ -0.35 0.42
+ -0.12 0.15
+ -0.38 0.18
+ -0.87 1.15
+ 0.38 0.38
+ -0.58 0.57
+ 0.42 0.48
+ 0.64 0.59
+ -0.52 0.54
+ 0.56 0.54
+ -0.22 0.21
+ -0.64 0.94
+ 0.92 0.99
+ 0.73 0.31
+ -0.04 0.39
+ -0.42 0.40
+ -0.73 1.08
+ -0.67 0.51
+ -0.51 -0.11
+ -0.71 0.81
+ -0.44 0.32
+ 0.34 0.15
+ 0.45 0.27
+ 0.83 0.57
+ -0.91 1.07
+ -0.27 0.46
+ -0.51 0.23
+ -0.90 0.66
+ 0.09 0.03
+ 0.05 -0.16
+ -0.87 0.45
+ -0.43 0.61
+ -0.00 0.16
+ 0.48 0.49
+ -0.84 1.06
+ -0.45 0.34
+ -0.25 0.30
+ -0.91 1.03
+ -0.82 0.88
+ -0.45 0.23
+ 0.77 0.59
+ -0.40 0.30
+ 0.45 0.28
+ -0.56 0.17
+ -0.81 1.00
+ 0.68 0.81
+ 0.18 0.03
+ 0.64 1.01
+ 0.63 0.61
+ -0.16 0.48
+ 0.66 0.92
+ -0.22 0.14
+ -0.49 0.36
+ -0.66 0.92
+ 0.77 0.91
+ -0.10 -0.28
+ -0.42 0.22
+ 0.96 1.23
+ 0.85 0.72
+ 0.87 0.71
+ 0.86 0.85
+ -0.53 0.72
+ -0.60 0.54
+ 0.43 0.65
+ -0.37 0.29
+ -0.84 0.72
+ -0.46 0.35
+ 0.11 -0.39
+ 0.72 0.97
+ 0.92 0.89
+ -0.32 0.29
+ 0.27 0.05
+ 0.45 0.44
+ -0.25 0.34
+ -0.45 1.16
+ -0.52 0.85
+ -0.78 0.90
+ 0.03 -0.11
+ -0.88 0.33
+ -0.56 1.02
+ 0.83 0.54
+ -0.01 0.14
+ 0.86 1.05
+ -0.53 0.21
+ -0.60 0.10
+ 0.89 1.15
+ 0.65 0.61
+ -0.04 0.29
+ 0.72 0.51
+ -0.32 0.40
+ -0.75 0.69
+ 0.47 0.48
+ 0.49 0.69
+ 0.55 0.59
+ -0.20 -0.15
+ -0.52 0.36
+ -0.61 1.07
+ -0.02 -0.02
+ -0.67 0.63
+ 0.42 0.46
+ -0.20 0.66
+ 0.84 0.27
+ 0.05 -0.12
+ 0.96 0.51
+ -0.71 0.72
+ 0.04 -0.16
+ -0.68 0.91
+ -0.38 0.26
+ 0.11 0.18
+ 0.40 0.32
+ -0.75 0.59
+ -0.43 0.54
+ 0.96 1.53
+ -0.20 0.31
+ -0.14 -0.48
+ 0.84 0.50
+ 0.12 -0.28
+ 0.66 1.07
+ -0.15 0.19
+ 0.28 0.48
+ 0.78 0.54
+ -0.75 0.73
+ 0.37 0.26
+ -0.67 0.39
+ -0.71 0.31
+ -0.96 0.82
+ 0.68 0.75
+ -0.36 0.56
+ 0.59 0.44
+ 0.68 0.23
+ 0.10 -0.04
+ 0.13 0.43
+ 0.28 0.52
+ -0.83 0.55
+ -0.41 0.38
+ -0.21 -0.00
+ 0.08 0.23
+ 0.04 -0.02
+ -0.11 0.14
+ 0.40 0.34
+ 0.22 0.43
+ 0.30 0.19
+ -0.85 0.83
+ 0.13 -0.12
+ -0.55 0.49
+ 0.15 0.09
+ 0.48 0.15
+ -0.19 0.24
+ -0.39 -0.11
+ -0.79 0.50
+ -0.53 0.73
+ 0.07 -0.17
+ 0.51 0.75
+ 0.84 0.78
+ 0.70 0.39
+ 0.94 0.55
+ -0.73 1.00
+ 0.95 1.10
+ -0.44 0.55
+ -0.50 0.54
+ -0.34 0.00
+ 0.31 0.61
+ 0.02 0.14
+ 0.85 0.57
+ 0.74 0.58
+ -0.05 -0.14
+ -0.94 0.80
+ -0.59 0.18
+ 0.28 -0.32
+ 0.98 0.88
+ 0.64 0.20
+ -0.20 0.13
+ 0.58 1.10
+ 0.08 -0.05
+ -0.39 0.66
+ 0.22 0.36
+ 0.07 -0.08
+ -0.87 1.11
+ -0.05 0.14
+ 0.91 0.91
+ 0.87 1.19
+ -0.99 1.14
+ 0.55 1.30
+ -0.90 0.81
+ 0.49 0.42
+ 0.11 0.30
+ -0.90 0.64
+ -0.38 0.52
+ 0.38 0.61
+ -0.67 0.58
+ -0.05 0.25
+ 0.36 0.85
+ 0.84 0.51
+ 0.04 0.08
+ 0.05 0.08
+ -0.20 0.22
+ -0.98 0.74
+ 0.14 0.29
+ 0.93 1.22
+ 0.19 0.41
+ -0.85 1.16
+ 0.44 0.84
+ 0.34 -0.04
+ -0.33 0.16
+ 0.90 0.73
+ 0.58 0.39
+ -0.97 0.70
+ -0.18 0.09
+ -1.00 0.83
+ -0.75 1.07
+ 0.98 1.01
+ -0.94 1.11
+ 0.46 1.00
+ 0.64 0.38
+ -0.21 -0.10
+ -0.18 0.04
+ -0.39 0.61
+ -0.85 0.46
+ -0.94 1.08
+ 0.49 0.26
+ -0.94 0.87
+ 0.08 0.19
+ -0.05 0.45
+ -0.98 0.94
+ 0.75 0.86
+ 0.67 0.83
+ 0.62 0.21
+ -0.29 -0.33
+ 0.64 0.42
+ -0.32 0.48
+ 0.39 0.34
+ -0.88 1.29
+ -0.03 -0.23
+ 0.77 0.68
+ 0.15 0.18
+ -0.99 1.20
+ -0.38 0.25
+ -0.58 0.45
+ 0.08 0.00
+ 0.30 0.29
+ -0.78 0.61
+ 0.85 0.55
+ -0.79 0.57
+ 0.75 0.40
+ -0.13 -0.03
+ -0.34 -0.00
+ -0.27 -0.03
+ 0.76 1.15
+ 0.12 0.66
+ 0.91 1.48
+ 0.10 -0.07
+ -0.17 -0.18
+ -0.65 0.98
+ -0.54 0.18
+ 0.02 0.07
+ 0.15 0.43
+ -0.82 0.94
+ 0.73 0.40
+ -0.47 0.41
+ -0.32 0.59
+ -0.33 0.41
+ 0.66 0.70
+ 0.88 0.79
+ 0.24 0.29
+ -0.89 1.25
+ 0.83 0.84
+ 0.35 0.74
+ 0.16 0.32
+ -0.70 0.83
+ 0.71 0.58
+ 0.17 -0.38
+ -0.83 0.77
+ 0.47 0.47
+ 0.28 0.51
+ 0.18 0.54
+ -0.86 0.79
+ -0.36 -0.00
+ 0.22 -0.04
+ -0.07 0.57
+ 0.49 0.31
+ -0.96 0.58
+ -0.85 0.80
+ 0.92 0.93
+ -0.96 0.94
+ 0.19 -0.39
+ -0.33 0.07
+ -0.71 0.67
+ 0.30 -0.22
+ -0.29 0.67
+ -0.18 -0.17
+ 0.68 0.52
+ 0.57 0.39
+ -0.17 0.41
+ -0.08 0.06
+ 0.89 0.86
+ 0.99 0.88
+ -0.69 0.71
+ 0.09 0.42
+ -0.95 1.04
+ 0.71 0.57
+ 0.67 0.95
+ 0.24 0.13
+ -0.34 0.22
+ -0.00 -0.00
+ -0.05 0.71
+ 0.93 1.59
+ -0.29 0.32
+ 0.27 0.50
+ -0.90 0.91
+ 0.17 0.17
+ 0.23 0.16
+ 0.19 -0.16
+ 0.93 0.50
+ 0.11 -0.39
+ 0.96 1.11
+ 0.64 1.04
+ -0.75 0.89
+ -0.55 0.11
+ 0.20 0.12
+ -0.10 -0.50
+ 0.40 0.75
+ 0.91 0.84
+ -0.60 0.31
+ 0.20 0.38
+ -0.85 0.87
+ 0.91 1.19
+ 0.27 0.42
+ 0.18 -0.01
+ -0.47 0.33
+ 0.02 -0.30
+ 0.84 1.08
+ 0.75 1.10
+ 0.06 -0.20
+ -0.32 0.31
+ -0.03 -0.19
+ 0.40 0.07
+ -0.33 0.20
+ 0.50 0.81
+ 0.57 1.11
+ -0.70 0.96
+ 0.09 -0.01
+ -0.99 0.86
+ 0.67 0.68
+ 0.15 0.04
+ 0.29 0.58
+ 0.03 -0.25
+ -0.59 0.75
+ -0.98 0.48
+ -0.33 0.25
+ -0.10 0.51
+ -0.78 0.67
+ -0.89 0.84
+ -0.23 0.13
+ -0.55 0.20
+ 0.61 0.79
+ -0.34 0.31
+ 0.90 0.79
+ 0.15 0.60
+ -0.48 0.04
+ -0.89 1.15
+ -0.92 1.34
+ 0.91 0.87
+ -0.34 0.12
+ 0.08 0.51
+ 0.82 0.58
+ -0.66 0.72
+ 0.02 -0.23
+ -0.05 0.51
+ 0.39 0.44
+ -0.64 0.79
+ 0.22 -0.30
+ -0.13 0.56
+ -0.03 0.14
+ 0.67 0.68
+ -0.02 0.05
+ -0.35 0.22
+ -0.50 0.57
+ -0.71 0.63
+ 0.70 0.73
+ -0.52 1.35
+ 0.51 0.38
+ -0.67 0.48
+ -0.34 0.56
+ -0.45 0.32
+ 0.82 0.73
+ -0.32 -0.31
+ -0.06 0.23
+ -0.33 0.00
+ -0.28 0.39
+ 0.98 1.13
+ 0.11 0.08
+ -0.01 -0.05
+ 0.36 0.13
+ -0.35 0.47
+ 0.84 1.12
+ -0.45 0.14
+ -0.48 0.03
+ 0.01 -0.27
+ 0.04 -0.14
+ 0.37 0.78
+ 0.58 -0.14
+ 0.56 0.50
+ 0.71 0.63
+ -0.79 0.64
+ -0.35 0.16
+ 0.16 -0.10
+ 0.73 1.05
+ -0.11 0.23
+ -0.76 0.56
+ 0.88 0.71
+ 0.43 0.26
+ -0.33 0.51
+ -0.55 0.53
+ -0.17 0.23
+ -0.16 -0.32
+ 0.53 0.29
+ 0.99 1.28
+ 0.30 0.24
+ -0.29 0.54
+ 0.23 0.34
+ 0.19 -0.00
+ 0.71 1.04
+ -0.35 0.39
+ 0.54 0.73
+ -0.25 0.03
+ -0.16 0.12
+ 0.43 0.51
+ 0.07 0.12
+ 0.58 -0.60
+ -0.59 0.36
+ -0.68 0.89
+ -0.24 0.08
+ 0.52 0.36
+ 0.23 0.14
+ 0.26 0.48
+ 0.50 0.68
+ 1.00 0.86
+ 0.25 -0.21
+ -0.37 0.24
+ 0.13 -0.40
+ 0.86 0.78
+ 0.81 1.03
+ 0.52 0.57
+ 0.79 0.79
+ -0.54 0.99
+ 0.19 -0.14
+ 0.09 -0.06
+ -0.73 0.52
+ -0.70 0.35
+ -0.16 0.47
+ -0.10 0.25
+ -0.19 0.08
+ -0.41 -0.05
+ -0.71 1.07
+ 0.19 -0.01
+ 0.94 1.21
+ 0.23 0.05
+ -0.25 0.25
+ -0.03 -0.48
+ -0.10 0.12
+ -0.59 0.73
+ 0.66 1.12
+ 0.36 0.29
+ -0.18 0.17
+ 0.99 1.20
+ 0.22 -0.22
+ -0.82 0.99
+ -0.54 0.93
+ -0.73 0.74
+ 0.34 0.63
+ 0.66 0.75
+ -0.72 0.68
+ 0.73 0.87
+ 0.72 0.60
+ -0.36 0.59
+ -0.01 -0.03
+ -0.20 0.33
+ -0.22 -0.30
+ 0.90 1.11
+ 0.37 0.13
+ -0.92 0.69
+ -0.93 1.00
+ 0.84 1.16
+ -0.70 0.99
+ -0.51 0.50
+ -0.78 1.09
+ -0.65 1.21
+ -0.85 0.68
+ 0.42 0.37
+ 0.65 0.37
+ -0.25 0.13
+ -0.61 0.40
+ -0.54 0.29
+ 0.98 1.38
+ -0.16 0.43
+ 0.66 0.41
+ -0.27 0.25
+ 0.40 0.76
+ 0.97 1.05
+ 0.81 1.14
+ -0.67 0.26
+ -0.39 0.49
+ 0.57 0.71
+ 0.84 0.65
+ 0.42 0.81
+ -0.73 0.85
+ 0.54 -0.01
+ 0.82 0.47
+ -0.20 0.51
+ 0.15 0.16
+ 0.79 1.09
+ -0.22 0.19
+ -0.78 0.89
+ -0.51 0.16
+ 0.69 0.56
+ -0.87 0.79
+ -0.95 1.23
+ 0.02 -0.14
+ -0.47 0.50
+ 0.82 0.83
+ -0.25 0.08
+ -0.14 0.42
+ 0.20 0.55
+ 0.97 1.04
+ 0.30 0.44
+ -0.66 0.85
+ -0.02 -0.34
+ 0.02 0.34
+ 0.27 -0.23
+ 0.51 0.26
+ -0.00 -0.15
+ 0.70 0.80
+ -0.74 0.83
+ 0.05 -0.09
+ -0.97 0.64
+ 0.98 1.12
+ 0.92 0.76
+ -0.43 0.58
+ 0.74 0.87
+ 0.42 0.74
+ -0.00 -0.27
+ -0.78 0.59
+ -0.15 0.24
+ 0.19 -0.09
+ -0.57 0.40
+ -0.45 0.43
+ -0.46 0.39
+ -0.63 0.83
+ -0.50 0.32
+ 0.46 0.59
+ -0.96 0.81
+ -0.69 0.45
+ -0.51 0.93
+ 0.35 0.33
+ -0.17 0.13
+ -0.70 0.54
+ -0.17 0.07
+ 0.32 0.59
+ -0.01 -0.17
+ -0.77 0.55
+ -0.35 0.25
+ -0.19 -0.20
+ -0.08 0.16
+ 0.77 0.60
+ 0.52 1.07
+ 0.41 0.62
+ 0.09 -0.35
+ 0.24 0.74
+ -0.64 0.65
+ 0.96 0.80
+ 0.28 0.09
+ 0.51 0.54
+ -0.79 0.77
+ -0.73 0.96
+ -0.57 0.59
+ -0.75 0.78
+ -0.44 0.13
+ 0.61 0.40
+ -0.24 0.13
+ 0.64 1.05
+ -0.48 0.04
+ -0.87 0.61
+ -0.34 0.23
+ 0.23 0.17
+ -0.67 0.58
+ 0.33 -0.01
+ -0.86 1.48
+ 0.71 0.97
+ 0.16 -0.33
+ -0.49 0.46
+ 0.21 0.66
+ 0.46 0.58
+ 0.99 1.08
+ -0.36 0.49
+ 0.09 -0.12
+ -0.10 -0.35
+ 0.33 0.52
+ -0.97 1.56
+ -0.53 0.17
+ 0.31 0.38
+ 0.71 0.85
+ -0.56 0.81
+ 0.74 0.77
+ 0.48 0.65
+ 0.65 1.18
+ 0.70 0.73
+ -0.97 0.62
+ 0.75 0.32
+ 0.76 0.57
+ 0.32 0.49
+ 0.46 0.62
+ 0.95 1.14
+ 0.84 0.74
+ -0.78 0.37
+ 0.19 0.44
+ 0.39 0.44
+ 0.16 0.04
+ -0.88 1.11
+ -0.62 0.78
+ -0.20 0.06
+ -0.73 0.87
+ 0.49 0.56
+ -0.68 0.67
+ 0.31 -0.22
+ 0.99 0.74
+ 0.50 0.35
+ -0.46 0.75
+ -0.50 0.53
+ -0.02 0.03
+ -0.42 0.23
+ 0.91 0.59
+ -0.12 0.03
+ -0.57 0.73
+ 0.67 0.50
+ -0.52 0.32
+ -0.35 0.15
+ -0.07 0.01
+ -0.02 0.22
+ 0.23 0.29
+ -0.10 -0.44
+ -0.22 0.24
+ -0.34 -0.07
+ 0.43 0.51
+ 0.94 1.08
+ -0.80 1.10
+ 0.31 0.21
+ 0.82 0.78
+ -0.19 0.14
+ -0.07 0.07
+ -0.86 0.83
+ 0.97 0.99
+ -0.18 0.11
+ -0.49 0.18
+ -0.88 0.69
+ -0.13 -0.01
+ -0.29 0.19
+ 0.58 0.37
+ -0.64 0.28
+ -0.49 0.44
+ -0.85 0.72
+ 0.73 0.74
+ 0.43 0.32
+ -0.36 0.44
+ 0.30 0.08
+ -0.58 0.28
+ -0.40 0.40
+ -0.69 0.99
+ -0.54 0.30
+ -0.04 0.03
+ 0.13 0.33
+ 0.10 0.02
+ 0.20 0.42
+ 0.97 0.93
+ -0.98 1.33
+ -0.60 0.05
+ -0.11 -0.23
+ -0.82 0.75
+ -0.35 0.47
+ 0.14 0.11
+ 0.87 1.13
+ -0.34 0.66
+ -0.84 1.18
+ -0.82 1.03
+ -0.53 0.61
+ 0.60 0.40
+ 0.90 1.08
+ -0.56 0.73
+ 0.89 0.47
+ -0.72 0.70
+ 0.15 0.24
+ 0.95 1.05
+ 0.63 0.36
+ -0.76 0.64
+ 0.38 0.53
+ 0.55 0.62
+ 0.42 0.35
+ -0.91 0.88
+ -0.93 0.94
+ -0.64 0.18
+ -0.99 1.08
+ -0.71 1.04
+ 0.64 0.13
+ -0.48 0.51
+ -0.10 -0.10
+ 0.67 0.86
+ -0.83 0.69
+ -0.25 0.18
+ -0.07 0.17
+ 0.13 0.11
+ 0.77 1.05
+ 0.01 0.34
+ -0.12 0.02
+ 0.50 0.62
+ -0.07 -0.19
+ 0.76 1.08
+ -0.68 0.23
+ 0.18 0.01
+ -0.55 1.31
+ 0.68 0.83
+ -0.08 0.12
+ 0.31 0.53
+ 0.35 0.29
+ -0.61 0.51
+ -0.18 0.25
+ 0.50 0.58
+ -0.36 0.33
+ 0.46 -0.02
+ 0.72 0.96
+ -0.56 0.41
+ 0.73 0.96
+ -0.14 -0.15
+ 0.08 0.08
+ 0.76 0.62
+ 0.15 -0.25
+ 0.23 0.13
+ -0.12 0.11
+ 0.12 -0.57
+ -0.24 0.44
+ -0.63 0.67
+ -0.44 0.31
+ 0.84 0.99
+ -0.74 0.56
+ -0.74 0.51
+ 0.25 0.20
+ 0.76 0.88
+ 0.44 0.49
+ 0.32 0.42
+ -0.44 0.87
+ 0.33 0.62
+ -0.76 1.23
+ 0.74 1.40
+ 0.81 0.39
+ -0.40 0.23
+ 0.16 0.15
+ -0.54 0.92
+ -0.44 0.64
+ 0.85 1.25
+ 0.27 0.41
+ -0.94 0.76
+ 0.65 0.56
+ 0.87 0.82
+ -0.04 -0.10
+ -0.43 0.35
+ -0.78 0.77
+ -0.80 0.54
+ -0.04 0.23
+ 0.21 0.30
+ 0.71 0.64
+ 0.51 0.43
+ -0.38 0.33
+ -0.32 0.37
+ 0.77 0.95
+ -0.91 0.89
+ 0.79 0.70
+ -0.94 0.78
+ -0.05 -0.18
+ 0.85 0.98
+ -0.33 0.61
+ -0.51 0.82
+ -0.63 0.47
+ -0.77 0.40
+ -0.56 0.89
+ 0.67 0.68
+ -0.87 1.17
+ -0.25 0.43
+ 0.17 0.44
+ -0.13 0.20
+ -0.01 -0.14
+ 0.87 -0.02
+ 0.22 0.05
+ -0.77 0.75
+ -0.73 0.38
+ 0.68 0.53
+ -0.69 0.55
+ -0.17 0.28
+ -0.42 0.40
+ -0.53 1.08
+ -0.46 0.66
+ 0.89 0.73
+ -0.15 -0.02
+ 0.30 0.44
+ 0.42 0.43
+ 0.68 0.87
+ -0.66 0.84
+ -0.18 -0.03
+ -0.86 0.96
+ -0.93 1.08
+ -0.34 0.05
+ 0.42 0.40
+ -0.36 0.34
+ -0.44 -0.30
+ 0.80 0.66
+ -0.01 0.24
+ -0.40 -0.03
+ -0.47 0.46
+ -0.15 0.07
+ -0.41 -0.54
+ 0.25 -0.16
+ 0.86 0.92
+ 0.35 0.51
+ 0.90 1.17
+ -0.82 0.74
+ -0.90 1.10
+ 0.88 1.01
+ 0.95 0.89
+ 0.01 0.23
+ 0.54 0.70
+ -0.37 0.33
+ -0.12 -0.71
+ -0.47 0.88
+ 0.24 0.47
+ -0.17 0.35
+ 0.68 0.56
+ -0.93 0.82
+ 0.60 0.86
+ 0.49 0.86
+ 0.78 0.81
+ -0.01 0.26
+ -0.96 1.34
+ -0.71 0.96
+ 0.17 0.37
+ -0.94 0.53
+ -0.42 0.77
+ 0.12 -0.05
+ 0.84 0.87
+ -0.57 0.48
+ -0.54 0.51
+ -0.98 0.56
+ -0.07 -0.16
+ -0.49 0.47
+ -0.08 0.28
+ -0.59 0.89
+ 0.32 0.11
+ -0.59 0.63
+ 0.08 0.34
+ 0.24 0.21
+ -0.35 0.52
+ -0.18 -0.07
+ 0.70 0.56
+ -0.05 0.24
+ -0.13 -0.36
+ 0.24 0.14
+ 0.54 0.55
+ 0.17 -0.32
+ -0.36 0.61
+ -0.64 0.70
+ 0.92 1.14
+ 0.35 0.29
+ -0.47 0.70
+ -0.96 0.97
+ 0.79 0.53
+ 0.54 0.32
+ -0.19 0.41
+ -0.41 0.57
+ 0.77 0.79
+ -0.53 0.53
+ -0.90 0.85
+ 0.54 0.38
+ -0.42 0.77
+ 0.02 0.19
+ -0.68 0.34
+ -0.87 0.48
+ -0.37 0.60
+ -0.46 0.47
+ 0.08 -0.01
+ 0.51 0.64
+ -0.53 0.06
+ 0.87 0.82
+ -0.25 0.41
+ -0.46 -0.06
+ 0.33 0.36
+ -0.05 0.01
+ -1.00 0.66
+ -0.42 0.10
+ 0.08 0.53
+ -0.98 0.72
+ 0.24 0.41
+ 0.18 0.36
+ -0.24 0.62
+ 0.56 0.35
+ 0.39 0.18
+ 0.76 0.27
+ 1.00 0.68
+ -0.52 0.39
+ -0.66 0.75
+ 0.53 0.67
+ 0.77 0.62
+ 0.84 0.70
+ 0.07 -0.28
+ -0.67 0.86
+ -0.75 0.92
+ 0.42 0.48
+ -0.32 0.59
+ 0.39 0.43
+ 0.79 1.31
+ 0.34 0.43
+ 0.48 0.86
+ -0.50 0.81
+ 0.94 1.67
+ 0.66 0.02
+ -0.28 0.02
+ 0.89 1.28
+ -0.74 1.16
+ 0.81 0.75
+ 0.96 0.12
+ -0.63 0.53
+ -0.86 1.20
+ 0.61 0.56
+ 0.53 0.95
+ 0.20 0.50
+ -0.07 -0.15
+ 0.28 -0.11
+ -0.23 0.47
+ 0.02 0.29
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/mixexp_graddesc.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/mixexp_graddesc.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+
+%%%%%%%%%%
+
+function [theta, eta] = mixture_of_experts(q, data, num_iter, theta, eta)
+% MIXTURE_OF_EXPERTS Fit a piecewise linear regression model using stochastic gradient descent.
+% [theta, eta] = mixture_of_experts(q, data, num_iter)
+%
+% Inputs:
+% q = number of pieces (experts)
+% data(l,:) = input example l
+%
+% Outputs:
+% theta(i,:) = regression vector for expert i
+% eta(i,:) = softmax (gating) params for expert i
+
+[num_cases dim] = size(data);
+data = [ones(num_cases,1) data]; % prepend with offset
+mu = 0.5; % step size
+sigma = 1; % variance of noise
+
+if nargin < 4
+ theta = 0.1*rand(q, dim);
+ eta = 0.1*rand(q, dim);
+end
+
+for t=1:num_iter
+ for iter=1:num_cases
+ x = data(iter, 1:dim);
+ ystar = data(iter, dim+1); % target
+ % yhat(i) = E[y | Q=i, x] = prediction of i'th expert
+ yhat = theta * x';
+ % gate_prior(i,:) = Pr(Q=i | x)
+ gate_prior = exp(eta * x');
+ gate_prior = gate_prior / sum(gate_prior);
+ % lik(i) = Pr(y | Q=i, x)
+ lik = (1/(sqrt(2*pi)*sigma)) * exp(-(0.5/sigma^2) * ((ystar - yhat) .* (ystar - yhat)));
+ % gate_posterior(i,:) = Pr(Q=i | x, y)
+ gate_posterior = gate_prior .* lik;
+ gate_posterior = gate_posterior / sum(gate_posterior);
+ % Update
+ eta = eta + mu*(gate_posterior - gate_prior)*x;
+ theta = theta + mu*(gate_posterior .* (ystar - yhat))*x;
+ end
+
+ if mod(t,100)==0
+ fprintf(1, 'iter %d\n', t);
+ end
+
+end
+fprintf(1, '\n');
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/mixexp_plot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/mixexp_plot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function plot_mixexp(theta, eta, data)
+% PLOT_MIXEXP Plot the results for a piecewise linear regression model
+% plot_mixexp(theta, eta, data)
+%
+% data(l,:) = [x y] for example l
+% theta(i,:) = regression vector for expert i
+% eta(i,:) = softmax (gating) params for expert i
+
+numexp = size(theta, 1);
+
+mn = min(data);
+mx = max(data);
+xa = mn(1):0.01:mx(1);
+x = [ones(length(xa),1) xa'];
+% pr(i,l) = posterior probability of expert i on example l
+pr = exp(eta * x');
+pr = pr ./ (ones(numexp,1) * sum(pr));
+% y(i,l) = prediction of expert i for example l
+y = theta * x';
+% yg(l) = weighted prediction for example l
+yg = sum(y .* pr)';
+
+subplot(3,2,1);
+plot(xa, y(1,:));
+title('expert 1');
+
+subplot(3,2,2);
+plot(xa, y(2,:));
+title('expert 2');
+
+subplot(3,2,3);
+plot(xa, pr(1,:));
+title('gating 1');
+
+subplot(3,2,4);
+plot(xa, pr(2,:));
+title('gating 2');
+
+subplot(3,2,5);
+plot(xa, yg);
+axis([-1 1 -1 2])
+title('prediction');
+
+subplot(3,2,6);
+title('data');
+hold on
+plot(data(:,1), data(:,2), '+');
+hold off
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/sprinkler.bif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Misc/sprinkler.bif Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+network Grass
+ {}
+variable Cloudy
+ { type discrete[2] {false true}; }
+variable Sprinkler
+ { type discrete[2] {false true}; }
+variable Rain
+ { type discrete[2] {false true}; }
+variable WetGrass
+ { type discrete[2] {false true}; }
+probability (Cloudy)
+ { table 0.5 0.5; }
+probability (Sprinkler | Cloudy)
+ { table 0.5 0.9 0.5 0.1; }
+probability (Rain | Cloudy)
+ { table 0.8 0.2 0.2 0.8; }
+probability (WetGrass | Rain Sprinkler)
+ { table 1.0 0.1 0.1 0.01 0.0 0.9 0.9 0.99; }
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+/mk_alarm_bnet.m/1.1.1.1/Sun Nov 3 16:44:14 2002//
+/mk_asia_bnet.m/1.1.1.1/Wed Mar 26 00:06:42 2003//
+/mk_cancer_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_car_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_hmm_bnet.m/1.1.1.1/Thu Jan 15 01:06:12 2004//
+/mk_ideker_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_incinerator_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_markov_chain_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_minimal_qmr_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_qmr_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_vstruct_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/Models
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/mk_hmm_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/Models/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/Old/mk_hmm_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/Old/mk_hmm_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,58 @@
+function [bnet, onodes] = mk_hmm_bnet(T, Q, O, cts_obs, param_tying)
+% MK_HMM_BNET Make a (static( bnet to represent a hidden Markov model
+% [bnet, onodes] = mk_hmm_bnet(T, Q, O, cts_obs, param_tying)
+%
+% T = num time slices
+% Q = num hidden states
+% O = size of the observed node (num discrete values or length of vector)
+% cts_obs - 1 means the observed node is a continuous-valued vector, 0 means it's discrete
+% param_tying - 1 means we create 3 CPDs, 0 means we create 1 CPD per node
+
+N = 2*T;
+dag = zeros(N);
+for i=1:T-1
+ dag(i,i+1)=1;
+end
+onodes = T+1:N;
+for i=1:T
+ dag(i, onodes(i)) = 1;
+end
+
+if cts_obs
+ dnodes = 1:T;
+else
+ dnodes = 1:N;
+end
+ns = [Q*ones(1,T) O*ones(1,T)];
+
+if param_tying
+ eclass = [1 2*ones(1,T-1) 3*ones(1,T)];
+else
+ eclass = 1:N;
+end
+
+bnet = mk_bnet(dag, ns, dnodes, eclass);
+
+hnodes = mysetdiff(1:N, onodes);
+if ~param_tying
+ for i=hnodes(:)'
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+ if cts_obs
+ for i=onodes(:)'
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+ end
+ else
+ for i=onodes(:)'
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+ end
+else
+ bnet.CPD{1} = tabular_CPD(bnet, 1);
+ bnet.CPD{2} = tabular_CPD(bnet, 2);
+ if cts_obs
+ bnet.CPD{3} = gaussian_CPD(bnet, 3);
+ else
+ bnet.CPD{3} = tabular_CPD(bnet, 3);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_alarm_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_alarm_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,118 @@
+function bnet = mk_alarm_bnet()
+
+% Written by Qian Diao on 11 Dec 01
+
+N = 37;
+dag = zeros(N,N);
+dag(21,23) = 1 ;
+dag(21,24) = 1 ;
+dag(1,24) = 1 ;
+dag(1,23) = 1 ;
+dag(2,26) = 1 ;
+dag(2,25) = 1 ;
+dag(2,24) = 1 ;
+dag(2,13) = 1 ;
+dag(2,23) = 1 ;
+dag(13,30) = 1 ;
+dag(30,31) = 1 ;
+dag(3,14) = 1 ;
+dag(3,19) = 1 ;
+dag(4,36) = 1 ;
+dag(14,35) = 1 ;
+dag(32,33) = 1 ;
+dag(32,35) = 1 ;
+dag(32,34) = 1 ;
+dag(32,36) = 1 ;
+dag(15,21) = 1 ;
+dag(5,31) = 1 ;
+dag(27,30) = 1 ;
+dag(28,31) = 1 ;
+dag(28,29) = 1 ;
+dag(26,28) = 1 ;
+dag(26,27) = 1 ;
+dag(16,31) = 1 ;
+dag(16,37) = 1 ;
+dag(23,26) = 1 ;
+dag(23,29) = 1 ;
+dag(23,25) = 1 ;
+dag(6,15) = 1 ;
+dag(7,27) = 1 ;
+dag(8,21) = 1 ;
+dag(19,20) = 1 ;
+dag(19,22) = 1 ;
+dag(31,32) = 1 ;
+dag(9,14) = 1 ;
+dag(9,17) = 1 ;
+dag(9,19) = 1 ;
+dag(10,33) = 1 ;
+dag(10,34) = 1 ;
+dag(11,16) = 1 ;
+dag(12,13) = 1 ;
+dag(12,18) = 1 ;
+dag(35,37) = 1 ;
+
+node_sizes = 2*ones(1,N);
+node_sizes(2) = 3;
+node_sizes(6) = 3;
+node_sizes(14) = 3;
+node_sizes(15) = 4;
+node_sizes(16) = 3;
+node_sizes(18) = 3;
+node_sizes(19) = 3;
+node_sizes(20) = 3;
+node_sizes(21) = 4;
+node_sizes(22) = 3;
+node_sizes(23) = 4;
+node_sizes(24) = 4;
+node_sizes(25) = 4;
+node_sizes(26) = 4;
+node_sizes(27) = 3;
+node_sizes(28) = 3;
+node_sizes(29) = 4;
+node_sizes(30) = 3;
+node_sizes(32) = 3;
+node_sizes(33) = 3;
+node_sizes(34) = 3;
+node_sizes(35) = 3;
+node_sizes(36) = 3;
+node_sizes(37) = 3;
+
+bnet = mk_bnet(dag, node_sizes);
+
+bnet.CPD{1} = tabular_CPD(bnet, 1,[0.96 0.04 ]);
+bnet.CPD{2} = tabular_CPD(bnet, 2,[0.92 0.03 0.05 ]);
+bnet.CPD{3} = tabular_CPD(bnet, 3,[0.8 0.2 ]);
+bnet.CPD{4} = tabular_CPD(bnet, 4,[0.95 0.05 ]);
+bnet.CPD{5} = tabular_CPD(bnet, 5,[0.8 0.2 ]);
+bnet.CPD{6} = tabular_CPD(bnet, 6,[0.01 0.98 0.01 ]);
+bnet.CPD{7} = tabular_CPD(bnet, 7,[0.01 0.99 ]);
+bnet.CPD{8} = tabular_CPD(bnet, 8,[0.95 0.05 ]);
+bnet.CPD{9} = tabular_CPD(bnet, 9,[0.95 0.05 ]);
+bnet.CPD{10} = tabular_CPD(bnet, 10,[0.9 0.1 ]);
+bnet.CPD{11} = tabular_CPD(bnet, 11,[0.99 0.01 ]);
+bnet.CPD{12} = tabular_CPD(bnet, 12,[0.99 0.01 ]);
+bnet.CPD{13} = tabular_CPD(bnet, 13,[0.95 0.95 0.05 0.1 0.1 0.01 0.05 0.05 0.95 0.9 0.9 0.99 ]);
+bnet.CPD{14} = tabular_CPD(bnet, 14,[0.05 0.95 0.5 0.98 0.9 0.04 0.49 0.01 0.05 0.01 0.01 0.01 ]);
+bnet.CPD{15} = tabular_CPD(bnet, 15,[0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 ]);
+bnet.CPD{16} = tabular_CPD(bnet, 16,[0.3 0.98 0.4 0.01 0.3 0.01 ]);
+bnet.CPD{17} = tabular_CPD(bnet, 17,[0.99 0.1 0.01 0.9 ]);
+bnet.CPD{18} = tabular_CPD(bnet, 18,[0.05 0.01 0.9 0.19 0.05 0.8 ]);
+bnet.CPD{19} = tabular_CPD(bnet, 19,[0.05 0.98 0.01 0.95 0.9 0.01 0.09 0.04 0.05 0.01 0.9 0.01 ]);
+bnet.CPD{20} = tabular_CPD(bnet, 20,[0.95 0.04 0.01 0.04 0.95 0.29 0.01 0.01 0.7 ]);
+bnet.CPD{21} = tabular_CPD(bnet, 21,[0.97 0.97 0.01 0.97 0.01 0.97 0.01 0.97 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 ]);
+bnet.CPD{22} = tabular_CPD(bnet, 22,[0.95 0.04 0.01 0.04 0.95 0.04 0.01 0.01 0.95 ]);
+bnet.CPD{23} = tabular_CPD(bnet, 23,[0.97 0.97 0.97 0.97 0.97 0.97 0.01 0.95 0.97 0.97 0.01 0.95 0.01 0.4 0.97 0.97 0.01 0.5 0.01 0.3 0.97 0.97 0.01 0.3 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.03 0.01 0.01 0.97 0.03 0.01 0.58 0.01 0.01 0.01 0.48 0.01 0.68 0.01 0.01 0.01 0.68 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 0.01 ]);
+bnet.CPD{24} = tabular_CPD(bnet, 24,[0.97 0.97 0.97 0.97 0.97 0.97 0.01 0.01 0.4 0.1 0.01 0.01 0.01 0.01 0.2 0.05 0.01 0.01 0.01 0.01 0.2 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.49 0.58 0.84 0.9 0.29 0.01 0.01 0.75 0.25 0.01 0.01 0.01 0.01 0.7 0.15 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.3 0.01 0.05 0.08 0.3 0.97 0.08 0.04 0.25 0.38 0.08 0.01 0.01 0.09 0.25 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.2 0.01 0.01 0.01 0.4 0.01 0.9 0.01 0.45 0.6 0.9 0.97 0.97 0.01 0.59 0.97 0.97 ]);
+bnet.CPD{25} = tabular_CPD(bnet, 25,[0.97 0.97 0.97 0.01 0.6 0.01 0.01 0.5 0.01 0.01 0.5 0.01 0.01 0.01 0.01 0.97 0.38 0.97 0.01 0.48 0.01 0.01 0.48 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.01 0.97 ]);
+bnet.CPD{26} = tabular_CPD(bnet, 26,[0.97 0.97 0.97 0.01 0.01 0.03 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.95 0.01 0.01 0.94 0.01 0.01 0.88 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.04 0.01 0.01 0.1 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.01 ]);
+bnet.CPD{27} = tabular_CPD(bnet, 27,[0.98 0.98 0.98 0.98 0.95 0.01 0.95 0.01 0.01 0.01 0.01 0.01 0.04 0.95 0.04 0.01 0.01 0.01 0.01 0.01 0.01 0.04 0.01 0.98 ]);
+bnet.CPD{28} = tabular_CPD(bnet, 28,[0.01 0.01 0.04 0.9 0.01 0.01 0.92 0.09 0.98 0.98 0.04 0.01 ]);
+bnet.CPD{29} = tabular_CPD(bnet, 29,[0.97 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.97 0.01 0.01 0.01 0.01 0.97 0.97 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.97 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.97 0.97 0.43 ]);
+bnet.CPD{30} = tabular_CPD(bnet, 30,[0.98 0.98 0.01 0.98 0.01 0.69 0.01 0.01 0.98 0.01 0.01 0.3 0.01 0.01 0.01 0.01 0.98 0.01 ]);
+bnet.CPD{31} = tabular_CPD(bnet, 31,[0.05 0.01 0.05 0.01 0.05 0.01 0.05 0.01 0.05 0.01 0.05 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.1 0.01 0.95 0.01 0.95 0.05 0.1 0.01 0.95 0.01 0.95 0.05 0.1 0.01 0.3 0.01 0.3 0.01 0.95 0.01 0.99 0.05 0.95 0.05 0.95 0.01 0.99 0.05 0.99 0.05 0.3 0.01 0.99 0.01 0.3 0.01 0.95 0.99 0.95 0.99 0.95 0.99 0.95 0.99 0.95 0.99 0.95 0.99 0.99 0.99 0.99 0.99 0.99 0.99 0.9 0.99 0.05 0.99 0.05 0.95 0.9 0.99 0.05 0.99 0.05 0.95 0.9 0.99 0.7 0.99 0.7 0.99 0.05 0.99 0.00999999 0.95 0.05 0.95 0.05 0.99 0.01 0.95 0.01 0.95 0.7 0.99 0.01 0.99 0.7 0.99 ]);
+bnet.CPD{32} = tabular_CPD(bnet, 32,[0.1 0.01 0.89 0.09 0.01 0.9 ]);
+bnet.CPD{33} = tabular_CPD(bnet, 33,[0.98 0.33333334 0.01 0.33333334 0.01 0.33333334 0.01 0.33333334 0.98 0.33333334 0.01 0.33333334 0.01 0.33333334 0.01 0.33333334 0.98 0.33333334 ]);
+bnet.CPD{34} = tabular_CPD(bnet, 34,[0.98 0.33333334 0.01 0.33333334 0.01 0.33333334 0.01 0.33333334 0.98 0.33333334 0.01 0.33333334 0.01 0.33333334 0.01 0.33333334 0.98 0.33333334 ]);
+bnet.CPD{35} = tabular_CPD(bnet, 35,[0.98 0.95 0.3 0.95 0.04 0.01 0.8 0.01 0.01 0.01 0.04 0.69 0.04 0.95 0.3 0.19 0.04 0.01 0.01 0.01 0.01 0.01 0.01 0.69 0.01 0.95 0.98 ]);
+bnet.CPD{36} = tabular_CPD(bnet, 36,[0.98 0.98 0.01 0.4 0.01 0.3 0.01 0.01 0.98 0.59 0.01 0.4 0.01 0.01 0.01 0.01 0.98 0.3 ]);
+bnet.CPD{37} = tabular_CPD(bnet, 37,[0.98 0.98 0.3 0.98 0.1 0.05 0.9 0.05 0.01 0.01 0.01 0.6 0.01 0.85 0.4 0.09 0.2 0.09 0.01 0.01 0.1 0.01 0.05 0.55 0.01 0.75 0.9 ]);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_asia_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_asia_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,76 @@
+function bnet = mk_asia_bnet(CPD_type, p, arity)
+% MK_ASIA_BNET Make the 'Asia' bayes net.
+%
+% BNET = MK_ASIA_BNET uses the parameters specified on p21 of Cowell et al,
+% "Probabilistic networks and expert systems", Springer Verlag 1999.
+%
+% BNET = MK_ASIA_BNET('cpt', p) uses random parameters drawn from a Dirichlet(p,p,...)
+% distribution. If p << 1, this is nearly deterministic; if p >> 1, this is nearly uniform.
+%
+% BNET = MK_ASIA_BNET('bool') makes each CPT a random boolean function.
+%
+% BNET = MK_ASIA_BNET('gauss') makes each CPT a random linear Gaussian distribution.
+%
+% BNET = MK_ASIA_BNET('orig') is the same as MK_ASIA_BNET.
+%
+% BNET = MK_ASIA_BNET('cpt', p, arity) can specify non-binary nodes.
+
+
+if nargin == 0, CPD_type = 'orig'; end
+if nargin < 3, arity = 2; end
+
+Smoking = 1;
+Bronchitis = 2;
+LungCancer = 3;
+VisitToAsia = 4;
+TB = 5;
+TBorCancer = 6;
+Dys = 7;
+Xray = 8;
+
+n = 8;
+dag = zeros(n);
+dag(Smoking, [Bronchitis LungCancer]) = 1;
+dag(Bronchitis, Dys) = 1;
+dag(LungCancer, TBorCancer) = 1;
+dag(VisitToAsia, TB) = 1;
+dag(TB, TBorCancer) = 1;
+dag(TBorCancer, [Dys Xray]) = 1;
+
+ns = arity*ones(1,n);
+if strcmp(CPD_type, 'gauss')
+ dnodes = [];
+else
+ dnodes = 1:n;
+end
+bnet = mk_bnet(dag, ns, 'discrete', dnodes);
+
+switch CPD_type
+ case 'orig',
+ % true is 2, false is 1
+ bnet.CPD{VisitToAsia} = tabular_CPD(bnet, VisitToAsia, [0.99 0.01]);
+ bnet.CPD{Bronchitis} = tabular_CPD(bnet, Bronchitis, [0.7 0.4 0.3 0.6]);
+ % minka: bug fix
+ bnet.CPD{Dys} = tabular_CPD(bnet, Dys, [0.9 0.2 0.3 0.1 0.1 0.8 0.7 0.9]);
+ bnet.CPD{TBorCancer} = tabular_CPD(bnet, TBorCancer, [1 0 0 0 0 1 1 1]);
+ % minka: bug fix
+ bnet.CPD{LungCancer} = tabular_CPD(bnet, LungCancer, [0.99 0.9 0.01 0.1]);
+ bnet.CPD{Smoking} = tabular_CPD(bnet, Smoking, [0.5 0.5]);
+ bnet.CPD{TB} = tabular_CPD(bnet, TB, [0.99 0.95 0.01 0.05]);
+ bnet.CPD{Xray} = tabular_CPD(bnet, Xray, [0.95 0.02 0.05 0.98]);
+ case 'bool',
+ for i=1:n
+ bnet.CPD{i} = boolean_CPD(bnet, i, 'rnd');
+ end
+ case 'gauss',
+ for i=1:n
+ bnet.CPD{i} = gaussian_CPD(bnet, i, 'cov', 1*eye(ns(i)));
+ end
+ case 'cpt',
+ for i=1:n
+ bnet.CPD{i} = tabular_CPD(bnet, i, p);
+ end
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_cancer_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_cancer_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,61 @@
+function bnet = mk_cancer_bnet(CPD_type, p)
+% MK_CANCER_BNET Make the 'Cancer' Bayes net.
+%
+% BNET = MK_CANCER_BNET uses the noisy-or parameters specified in Fig 4a of the UAI98 paper by
+% Friedman, Murphy and Russell, "Learning the Structure of DPNs", p145.
+%
+% BNET = MK_CANCER_BNET('noisyor', p) makes each CPD a noisy-or, with probability p of
+% suppression for each parent; leaks are turned off.
+%
+% BNET = MK_CANCER_BNET('cpt', p) uses random CPT parameters drawn from a Dirichlet(p,p,...)
+% distribution. If p << 1, this is near deterministic; if p >> 1, this is near 1/k.
+% p defaults to 1.0 (uniform distribution).
+%
+% BNET = MK_CANCER_BNET('bool') makes each CPT a random boolean function.
+%
+% In all cases, the root is set to a uniform distribution.
+
+if nargin == 0
+ rnd = 0;
+else
+ rnd = 1;
+end
+
+n = 5;
+dag = zeros(n);
+dag(1,[2 3]) = 1;
+dag(2,4) = 1;
+dag(3,4) = 1;
+dag(4,5) = 1;
+
+ns = 2*ones(1,n);
+bnet = mk_bnet(dag, ns);
+
+if ~rnd
+ bnet.CPD{1} = tabular_CPD(bnet, 1, [0.5 0.5]);
+ bnet.CPD{2} = noisyor_CPD(bnet, 2, 1.0, 1-0.9);
+ bnet.CPD{3} = noisyor_CPD(bnet, 3, 1.0, 1-0.2);
+ bnet.CPD{4} = noisyor_CPD(bnet, 4, 1.0, 1-[0.7 0.6]);
+ bnet.CPD{5} = noisyor_CPD(bnet, 5, 1.0, 1-0.5);
+else
+ switch CPD_type
+ case 'noisyor',
+ for i=1:n
+ ps = parents(dag, i);
+ bnet.CPD{i} = noisyor_CPD(bnet, i, 1.0, p*ones(1,length(ps)));
+ end
+ case 'bool',
+ for i=1:n
+ bnet.CPD{i} = boolean_CPD(bnet, i, 'rnd');
+ end
+ case 'cpt',
+ for i=1:n
+ bnet.CPD{i} = tabular_CPD(bnet, i, p);
+ end
+ otherwise
+ error(['bad CPD type ' CPD_type]);
+ end
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_car_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_car_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function bnet = mk_car_bnet()
+% MK_CAR_BNET Make the car trouble-shooter bayes net.
+%
+% This network is from p13 of "Troubleshooting under uncertainty", Heckerman, Breese and
+% Rommelse, Microsoft Research Tech Report 1994.
+
+
+BatteryAge = 1;
+Battery = 2;
+Starter = 3;
+Lights = 4;
+TurnsOver = 5;
+FuelPump = 6;
+FuelLine = 7;
+FuelSubsys =8;
+Fuel = 9;
+Spark = 10;
+Starts = 11;
+Gauge = 12;
+
+n = 12;
+dag = zeros(n);
+dag(1,2) = 1;
+dag(2,[4 5])=1;
+dag(3,5) = 1;
+dag(6,8) = 1;
+dag(7,8) = 1;
+dag(8,11) = 1;
+dag(9,12) = 1;
+dag(10,11) = 1;
+
+arity = 2;
+ns = arity*ones(1,n);
+bnet = mk_bnet(dag, ns);
+for i=1:n
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_hmm_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_hmm_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,67 @@
+function bnet = mk_hmm_bnet(T, Q, O, cts_obs, param_tying)
+% MK_HMM_BNET Make a (static) bnet to represent a hidden Markov model
+% bnet = mk_hmm_bnet(T, Q, O, cts_obs, param_tying)
+%
+% T = num time slices
+% Q = num hidden states
+% O = size of the observed node (num discrete values or length of vector)
+% cts_obs - 1 means the observed node is a continuous-valued vector, 0 means it's discrete
+% param_tying - 1 means we create 3 CPDs, 0 means we create 1 CPD per node
+
+N = 2*T;
+dag = zeros(N);
+%hnodes = 1:2:2*T;
+hnodes = 1:T;
+for i=1:T-1
+ dag(hnodes(i), hnodes(i+1))=1;
+end
+%onodes = 2:2:2*T;
+onodes = T+1:2*T;
+for i=1:T
+ dag(hnodes(i), onodes(i)) = 1;
+end
+
+if cts_obs
+ dnodes = hnodes;
+else
+ dnodes = 1:N;
+end
+ns = ones(1,N);
+ns(hnodes) = Q;
+ns(onodes) = O;
+
+if param_tying
+ H1class = 1; Hclass = 2; Oclass = 3;
+ eclass = ones(1,N);
+ eclass(hnodes(2:end)) = Hclass;
+ eclass(hnodes(1)) = H1class;
+ eclass(onodes) = Oclass;
+else
+ eclass = 1:N;
+end
+
+bnet = mk_bnet(dag, ns, 'observed', onodes, 'discrete', dnodes, 'equiv_class', eclass);
+
+hnodes = mysetdiff(1:N, onodes);
+if ~param_tying
+ for i=hnodes(:)'
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+ if cts_obs
+ for i=onodes(:)'
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+ end
+ else
+ for i=onodes(:)'
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+ end
+else
+ bnet.CPD{H1class} = tabular_CPD(bnet, hnodes(1)); % prior
+ bnet.CPD{Hclass} = tabular_CPD(bnet, hnodes(2)); % transition matrix
+ if cts_obs
+ bnet.CPD{Oclass} = gaussian_CPD(bnet, onodes(1));
+ else
+ bnet.CPD{Oclass} = tabular_CPD(bnet, onodes(1));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_ideker_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_ideker_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function bnet = mk_ideker_bnet(CPD_type, p)
+% MK_IDEKER_BNET Make the Bayes net in the PSB'00 paper by Ideker, Thorsson and Karp.
+%
+% BNET = MK_IDEKER_BNET uses the boolean functions specified in the paper
+% "Discovery of regulatory interactions through perturbation: inference and experimental design",
+% Pacific Symp. on Biocomputing, 2000.
+%
+% BNET = MK_IDEKER_BNET('root') uses the above boolean functions, but puts a uniform
+% distribution on the root nodes.
+%
+% BNET = MK_IDEKER_BNET('cpt', p) uses random parameters drawn from a Dirichlet(p,p,...)
+% distribution. If p << 1, this is nearly deterministic; if p >> 1, this is nearly uniform.
+%
+% BNET = MK_IDEKER_BNET('bool') makes each CPT a random boolean function.
+%
+% BNET = MK_IDEKER_BNET('orig') is the same as MK_IDEKER_BNET.
+
+
+if nargin == 0
+ CPD_type = 'orig';
+end
+
+n = 4;
+dag = zeros(n);
+dag(1,3)=1;
+dag(2,[3 4])=1;
+dag(3,4)=1;
+ns = 2*ones(1,n);
+bnet = mk_bnet(dag, ns);
+
+switch CPD_type
+ case 'orig',
+ bnet.CPD{1} = tabular_CPD(bnet, 1, [0 1]);
+ bnet.CPD{2} = tabular_CPD(bnet, 2, [0 1]);
+ bnet.CPD{3} = boolean_CPD(bnet, 3, 'inline', inline('x(1) & x(2)'));
+ bnet.CPD{4} = boolean_CPD(bnet, 4, 'inline', inline('x(1) & ~x(2)'));
+ case 'root',
+ bnet.CPD{1} = tabular_CPD(bnet, 1, [0.5 0.5]);
+ bnet.CPD{2} = tabular_CPD(bnet, 2, [0.5 0.5]);
+ bnet.CPD{3} = boolean_CPD(bnet, 3, 'inline', inline('x(1) & x(2)'));
+ bnet.CPD{4} = boolean_CPD(bnet, 4, 'inline', inline('x(1) & ~x(2)'));
+ case 'bool',
+ for i=1:n
+ bnet.CPD{i} = boolean_CPD(bnet, i, 'rnd');
+ end
+ case 'cpt',
+ for i=1:n
+ bnet.CPD{i} = tabular_CPD(bnet, i, p);
+ end
+ otherwise,
+ error(['unknown type ' CPD_type]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_incinerator_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_incinerator_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,61 @@
+function bnet = mk_incinerator_bnet(ns)
+% MK_INCINERATOR_BNET The waste incinerator emissions example from Cowell et al p145
+% function bnet = mk_incinerator_bnet(ns)
+%
+% If ns is omitted, we use the scalars and binary nodes and the original params.
+% Otherwise, we use random params of the desired size.
+%
+% Lauritzen, "Propogation of Probabilities, Means and Variances in Mixed Graphical Association Models",
+% JASA 87(420): 1098--1108
+% This example is reprinted on p145 of "Probabilistic Networks and Expert Systems",
+% Cowell, Dawid, Lauritzen and Spiegelhalter, 1999, Springer.
+% For a picture, see http://www.cs.berkeley.edu/~murphyk/Bayes/usage.html#cg_model
+
+% node numbers
+F = 1; W = 2; E = 3; B = 4; C = 5; D = 6; Min = 7; Mout = 8; L = 9;
+names = {'F', 'W', 'E', 'B', 'C', 'D', 'Min', 'Mout', 'L'};
+n = 9;
+dnodes = [F W B];
+cnodes = mysetdiff(1:n, dnodes);
+
+% node sizes - all cts nodes are scalar, all discrete nodes are binary
+if nargin < 1
+ ns = ones(1, n);
+ ns(dnodes) = 2;
+ rnd = 0;
+else
+ rnd = 1;
+end
+
+% topology (p 1099, fig 1)
+dag = zeros(n);
+dag(F,E)=1;
+dag(W,[E Min D]) = 1;
+dag(E,D)=1;
+dag(B,[C D])=1;
+dag(D,[L Mout])=1;
+dag(Min,Mout)=1;
+
+% params (p 1102)
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'names', names);
+
+if rnd
+ for i=dnodes(:)'
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+ for i=cnodes(:)'
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+ end
+else
+ bnet.CPD{B} = tabular_CPD(bnet, B, 'CPT', [0.85 0.15]); % 1=stable, 2=unstable
+ bnet.CPD{F} = tabular_CPD(bnet, F, 'CPT', [0.95 0.05]); % 1=intact, 2=defect
+ bnet.CPD{W} = tabular_CPD(bnet, W, 'CPT', [2/7 5/7]); % 1=industrial, 2=household
+ bnet.CPD{E} = gaussian_CPD(bnet, E, 'mean', [-3.9 -0.4 -3.2 -0.5], ...
+ 'cov', [0.00002 0.0001 0.00002 0.0001]);
+ bnet.CPD{D} = gaussian_CPD(bnet, D, 'mean', [6.5 6.0 7.5 7.0], ...
+ 'cov', [0.03 0.04 0.1 0.1], 'weights', [1 1 1 1]);
+ bnet.CPD{C} = gaussian_CPD(bnet, C, 'mean', [-2 -1], 'cov', [0.1 0.3]);
+ bnet.CPD{L} = gaussian_CPD(bnet, L, 'mean', 3, 'cov', 0.25, 'weights', -0.5);
+ bnet.CPD{Min} = gaussian_CPD(bnet, Min, 'mean', [0.5 -0.5], 'cov', [0.01 0.005]);
+ bnet.CPD{Mout} = gaussian_CPD(bnet, Mout, 'mean', 0, 'cov', 0.002, 'weights', [1 1]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_markov_chain_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_markov_chain_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function bnet = mk_markov_chain_bnet(N, Q)
+
+dag = zeros(N);
+dag(1,2)=1; dag(2,3)=1;
+ns = Q*ones(1,N);
+bnet = mk_bnet(dag, ns);
+for i=1:N
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_minimal_qmr_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_minimal_qmr_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,82 @@
+function [bnet, vals] = mk_minimal_qmr_bnet(G, inhibit, leak, prior, pos, neg, pos_only)
+% MK_MINIMAL_QMR_BNET Make a QMR model which only contains the observed findings
+% [bnet, vals] = mk_minimal_qmr_bnet(G, inhibit, prior, leak, pos, neg)
+%
+% Input:
+% G(i,j) = 1 iff there is an arc from disease i to finding j
+% inhibit(i,j) = inhibition probability on i->j arc
+% leak(j) = inhibition prob. on leak->j arc
+% prior(i) = prob. disease i is on
+% pos = list of leaves that have positive observations
+% neg = list of leaves that have negative observations
+% pos_only = 1 means only include positively observed leaves in the model - the negative
+% ones are absorbed into the prior terms
+%
+% Output:
+% bnet
+% vals is their value
+
+if pos_only
+ obs = pos;
+else
+ obs = myunion(pos, neg);
+end
+Nfindings = length(obs);
+[Ndiseases maxNfindings] = size(inhibit);
+N = Ndiseases + Nfindings;
+finding_node = Ndiseases+1:N;
+
+% j = finding_node(i) means the i'th finding node is the j'th node in the bnet
+% k = obs(i) means the i'th observed (positive) finding is the k'th finding overall
+% If all findings are observed, and posonly = 0, we have i = obs(i) for all i.
+
+%dag = sparse(N, N);
+dag = zeros(N, N);
+dag(1:Ndiseases, Ndiseases+1:N) = G(:,obs);
+
+ns = 2*ones(1,N);
+bnet = mk_bnet(dag, ns, 'observed', finding_node);
+
+CPT = cell(1, Ndiseases);
+for d=1:Ndiseases
+ CPT{d} = [1-prior(d) prior(d)];
+end
+
+if pos_only
+ % Fold in the negative evidence into the prior
+ for i=1:length(neg)
+ n = neg(i);
+ ps = parents(G,n);
+ for pi=1:length(ps)
+ p = ps(pi);
+ q = inhibit(p,n);
+ CPT{p} = CPT{p} .* [1 q];
+ end
+ % Arbitrarily attach the leak term to the first parent
+ p = ps(1);
+ q = leak(n);
+ CPT{p} = CPT{p} .* [q q];
+ end
+end
+
+for d=1:Ndiseases
+ bnet.CPD{d} = tabular_CPD(bnet, d, CPT{d}');
+end
+
+for i=1:Nfindings
+ fnode = finding_node(i);
+ fid = obs(i);
+ ps = parents(G, fid);
+ bnet.CPD{fnode} = noisyor_CPD(bnet, fnode, leak(fid), inhibit(ps, fid));
+end
+
+obs_nodes = finding_node;
+vals = sparse(1, maxNfindings);
+vals(pos) = 2;
+vals(neg) = 1;
+vals = full(vals(obs));
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_qmr_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_qmr_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function bnet = mk_qmr_bnet(G, inhibit, leak, prior, tabular_findings, onodes)
+% MK_QMR_BNET Make a QMR model
+% bnet = mk_qmr_bnet(G, inhibit, leak, prior)
+%
+% G(i,j) = 1 iff there is an arc from disease i to finding j
+% inhibit(i,j) = inhibition probability on i->j arc
+% leak(j) = inhibition prob. on leak->j arc
+% prior(i) = prob. disease i is on
+% tabular_findings = 1 means multinomial leaves (ignores leak/inhibit params)
+% = 0 means noisy-OR leaves (default = 0)
+
+if nargin < 5, tabular_findings = 0; end
+
+[Ndiseases Nfindings] = size(inhibit);
+N = Ndiseases + Nfindings;
+finding_node = Ndiseases+1:N;
+ns = 2*ones(1,N);
+dag = zeros(N,N);
+dag(1:Ndiseases, finding_node) = G;
+if nargin < 6, onodes = finding_node; end
+bnet = mk_bnet(dag, ns, 'observed', onodes);
+
+for d=1:Ndiseases
+ CPT = [1-prior(d) prior(d)];
+ bnet.CPD{d} = tabular_CPD(bnet, d, CPT');
+end
+
+for i=1:Nfindings
+ fnode = finding_node(i);
+ ps = parents(G, i);
+ if tabular_findings
+ bnet.CPD{fnode} = tabular_CPD(bnet, fnode);
+ else
+ bnet.CPD{fnode} = noisyor_CPD(bnet, fnode, leak(i), inhibit(ps, i));
+ end
+end
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_vstruct_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Models/mk_vstruct_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function oracle = mk_vstruct_bnet()
+% MK_VSTRUCT_BNET Make a simple V-structured 3-node noisy-AND Bayes net
+% oracle = mk_vstruct_bnet()
+
+N = 3;
+dag = zeros(N);
+A = 1; B = 2; C = 3;
+dag(A,C)=1;
+dag(B,C)=1;
+ns = 2*ones(1,N);
+
+oracle = mk_bnet(dag, ns);
+oracle.CPD{1} = tabular_CPD(oracle, 1, [0.5 0.5]);
+oracle.CPD{2} = tabular_CPD(oracle, 2, [0.5 0.5]);
+pnoise = 0.1; % degree of noise
+oracle.CPD{3} = boolean_CPD(oracle, 3, 'named', 'all', pnoise);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/scg1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/scg2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/scg3.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/scg_3node.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/scg_unstable.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/SCG
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,77 @@
+% Same as cg1, except we call stab_cond_gauss_inf_engine
+
+bnet = mk_incinerator_bnet;
+
+engines = {};
+engines{end+1} = stab_cond_gauss_inf_engine(bnet);
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = cond_gauss_inf_engine(bnet);
+nengines = length(engines);
+
+F = 1; W = 2; E = 3; B = 4; C = 5; D = 6; Min = 7; Mout = 8; L = 9;
+n = 9;
+dnodes = [B F W];
+cnodes = mysetdiff(1:n, dnodes);
+
+evidence = cell(1,n); % no evidence
+ll = zeros(1, nengines);
+for e=1:nengines
+ [engines{e}, ll(e)] = enter_evidence(engines{e}, evidence);
+end
+%assert(approxeq(ll(1), ll)))
+ll
+
+% Compare to the results in table on p1107.
+% These results are printed to 3dp in Cowell p150
+
+mu = zeros(1,n);
+sigma = zeros(1,n);
+dprob = zeros(1,n);
+addev = 1;
+tol = 1e-2;
+for e=1:nengines
+ for i=cnodes(:)'
+ m = marginal_nodes(engines{e}, i, addev);
+ mu(i) = m.mu;
+ sigma(i) = sqrt(m.Sigma);
+ end
+ for i=dnodes(:)'
+ m = marginal_nodes(engines{e}, i, addev);
+ dprob(i) = m.T(1);
+ end
+ assert(approxeq(mu([E D C L Min Mout]), [-3.25 3.04 -1.85 1.48 -0.214 2.83], tol))
+ assert(approxeq(sigma([E D C L Min Mout]), [0.709 0.770 0.507 0.631 0.459 0.860], tol))
+ assert(approxeq(dprob([B F W]), [0.85 0.95 0.29], tol))
+ %m = marginal_nodes(engines{e}, bnet.names('E'), addev);
+ %assert(approxeq(m.mu, -3.25, tol))
+ %assert(approxeq(sqrt(m.Sigma), 0.709, tol))
+end
+
+% Add evidence (p 1105, top right)
+evidence = cell(1,n);
+evidence{W} = 1; % industrial
+evidence{L} = 1.1;
+evidence{C} = -0.9;
+
+ll = zeros(1, nengines);
+for e=1:nengines
+ [engines{e}, ll(e)] = enter_evidence(engines{e}, evidence);
+end
+%assert(all(approxeq(ll(1), ll)))
+ll
+
+for e=1:nengines
+ for i=cnodes(:)'
+ m = marginal_nodes(engines{e}, i, addev);
+ mu(i) = m.mu;
+ sigma(i) = sqrt(m.Sigma);
+ end
+ for i=dnodes(:)'
+ m = marginal_nodes(engines{e}, i, addev);
+ dprob(i) = m.T(1);
+ end
+ assert(approxeq(mu([E D C L Min Mout]), [-3.90 3.61 -0.9 1.1 0.5 4.11], tol))
+ assert(approxeq(sigma([E D C L Min Mout]), [0.076 0.326 0 0 0.1 0.344], tol))
+ assert(approxeq(dprob([B F W]), [0.0122 0.9995 1], tol))
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+% Same as cg2, except we call stab_cond_gauss_inf_engine
+
+ns = 2*ones(1,9);
+bnet = mk_incinerator_bnet(ns);
+
+engines = {};
+engines{end+1} = stab_cond_gauss_inf_engine(bnet);
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = cond_gauss_inf_engine(bnet);
+nengines = length(engines);
+
+[err, time] = cmp_inference_static(bnet, engines, 'singletons_only', 1);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+% Compare various inference engines on the following network (from Jensen (1996) p84 fig 4.17)
+% 1
+% / | \
+% 2 3 4
+% | | |
+% 5 6 7
+% \/ \/
+% 8 9
+% where all arcs point downwards
+
+N = 9;
+dag = zeros(N,N);
+dag(1,2)=1; dag(1,3)=1; dag(1,4)=1;
+dag(2,5)=1; dag(3,6)=1; dag(4,7)=1;
+dag(5,8)=1; dag(6,8)=1; dag(6,9)=1; dag(7,9) = 1;
+
+gauss = 1;
+if gauss
+ ns = ones(1,N); % scalar nodes
+ ns(1) = 2;
+ ns(9) = 3;
+ dnodes = [];
+else
+ ns = 2*ones(1,N); % binary nodes
+ dnodes = 1:N;
+end
+
+bnet = mk_bnet(dag, ns, 'discrete', dnodes);
+% use random params
+for i=1:N
+ if gauss
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+ else
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+ end
+end
+
+engines = {};
+engines{1} = jtree_inf_engine(bnet);
+engines{2} = stab_cond_gauss_inf_engine(bnet);
+
+[err, time] = cmp_inference_static(bnet, engines);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg_3node.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg_3node.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+% This example is from Page.143 of "Probabilistic Networks and Expert Systems",
+% Cowell, Dawid, Lauritzen and Spiegelhalter, 1999, Springer.
+
+X = 1; Y = 2; Z = 3;
+n = 3;
+
+dag = zeros(n);
+dag(X, Y)=1;
+dag(Y, Z)=1;
+
+ns = ones(1, n);
+dnodes = [];
+
+bnet = mk_bnet(dag, ns, dnodes);
+bnet.CPD{X} = gaussian_CPD(bnet, X, 'mean', 0, 'cov', 1);
+bnet.CPD{Y} = gaussian_CPD(bnet, Y, 'mean', 0, 'cov', 1, 'weights', 1);
+bnet.CPD{Z} = gaussian_CPD(bnet, Z, 'mean', 0, 'cov', 1, 'weights', 1);
+
+engines = {};
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = stab_cond_gauss_inf_engine(bnet);
+nengines = length(engines);
+
+evidence = cell(1,n);
+evidence{Y} = 1.5;
+
+for e=1:nengines
+ engines{e} = enter_evidence(engines{e}, evidence);
+ margX = marginal_nodes(engines{e}, X);
+ assert(approxeq(margX.mu, 0.75))
+ assert(approxeq(margX.Sigma, 0.5))
+
+ margZ = marginal_nodes(engines{e}, Z);
+ assert(approxeq(margZ.mu, 1.5))
+ assert(approxeq(margZ.Sigma, 1))
+end
+
+
+evidence = cell(1,n);
+evidence{Z} = 1.5;
+
+for e=1:nengines
+ engines{e} = enter_evidence(engines{e}, evidence);
+ margX = marginal_nodes(engines{e}, X);
+ assert(approxeq(margX.mu, 1/2))
+ assert(approxeq(margX.Sigma, 2/3))
+
+ margY = marginal_nodes(engines{e}, Y);
+ assert(approxeq(margY.mu, 1))
+ assert(approxeq(margY.Sigma, 2/3))
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg_unstable.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/SCG/scg_unstable.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,91 @@
+function scg_unstable()
+
+% the objective of this script is to test if the stable conditonal gaussian
+% inference can handle the numerical instability problem described on
+% page.151 of 'Probabilistic networks and expert system' by Cowell, Dawid, Lauritzen and
+% Spiegelhalter, 1999.
+
+A = 1; Y = 2;
+n = 2;
+
+ns = ones(1, n);
+dnodes = [A];
+cnodes = Y;
+ns = [2 1];
+
+dag = zeros(n);
+dag(A, Y) = 1;
+
+bnet = mk_bnet(dag, ns, dnodes);
+
+bnet.CPD{A} = tabular_CPD(bnet, A, [0.5 0.5]');
+bnet.CPD{Y} = gaussian_CPD(bnet, Y, 'mean', [0 1], 'cov', [1e-5 1e-6]);
+
+evidence = cell(1, n);
+
+pot_type = 'cg';
+potYgivenA = convert_to_pot(bnet.CPD{Y}, pot_type, [A Y], evidence);
+potA = convert_to_pot(bnet.CPD{A}, pot_type, A, evidence);
+potYandA = multiply_by_pot(potYgivenA, potA);
+potA2 = marginalize_pot(potYandA, A);
+
+thresh = 1; % 0dp
+
+[g,h,K] = extract_can(potA);
+assert(approxeq(g(:)', [-0.693147 -0.693147], thresh))
+
+
+[g,h,K] = extract_can(potYgivenA);
+assert(approxeq(g(:)', [4.83752 -499994], thresh))
+assert(approxeq(h(:)', [0 1e6]))
+assert(approxeq(K(:)', [1e5 1e6]))
+
+[g,h,K] = extract_can(potYandA);
+assert(approxeq(g(:)', [4.14437 -499995], thresh))
+assert(approxeq(h(:)', [0 1e6]))
+assert(approxeq(K(:)', [1e5 1e6]))
+
+
+[g,h,K] = extract_can(potA2);
+%assert(approxeq(g(:)', [-0.69315 -1]))
+g
+assert(approxeq(g(:)', [-0.69315 -0.69315]))
+
+
+
+if 0
+pot_type = 'scg';
+spotYgivenA = convert_to_pot(bnet.CPD{Y}, pot_type, [A Y], evidence);
+spotA = convert_to_pot(bnet.CPD{A}, pot_type, A, evidence);
+spotYandA = direct_combine_pots(spotYgivenA, spotA);
+spotA2 = marginalize_pot(spotYandA, A);
+
+spotA=struct(spotA);
+spotA2=struct(spotA2);
+for i=1:2
+ assert(approxeq(spotA2.scgpotc{i}.p, spotA.scgpotc{i}.p))
+ assert(approxeq(spotA2.scgpotc{i}.A, spotA.scgpotc{i}.A))
+ assert(approxeq(spotA2.scgpotc{i}.B, spotA.scgpotc{i}.B))
+ assert(approxeq(spotA2.scgpotc{i}.C, spotA.scgpotc{i}.C))
+end
+
+end
+
+
+%%%%%%%%%%%
+
+function [g,h,K] = extract_can(pot)
+
+pot = struct(pot);
+D = length(pot.can);
+g = zeros(1, D);
+h = zeros(1, D);
+K = zeros(1, D);
+for i=1:D
+ S = struct(pot.can{i});
+ g(i) = S.g;
+ if length(S.h) > 0
+ h(i) = S.h;
+ K(i) = S.K;
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+/bic1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/cooper_yoo.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/k2demo1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mcmc1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/model_select1.m/1.1.1.1/Sat Nov 6 20:55:18 2004//
+/model_select2.m/1.1.1.1/Sat Nov 6 21:52:42 2004//
+/pc1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/pc2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/StructLearn
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/bic1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/bic1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,79 @@
+% compare BIC and Bayesian score
+
+N = 4;
+dag = zeros(N,N);
+%C = 1; S = 2; R = 3; W = 4; % topological order
+C = 4; S = 2; R = 3; W = 1; % arbitrary order
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+
+false = 1; true = 2;
+ns = 2*ones(1,N); % binary nodes
+bnet = mk_bnet(dag, ns);
+bnet.CPD{C} = tabular_CPD(bnet, C, 'CPT', [0.5 0.5]);
+bnet.CPD{R} = tabular_CPD(bnet, R, 'CPT', [0.8 0.2 0.2 0.8]);
+bnet.CPD{S} = tabular_CPD(bnet, S, 'CPT', [0.5 0.9 0.5 0.1]);
+bnet.CPD{W} = tabular_CPD(bnet, W, 'CPT', [1 0.1 0.1 0.01 0 0.9 0.9 0.99]);
+
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+ncases = 1000;
+data = cell(N, ncases);
+for m=1:ncases
+ data(:,m) = sample_bnet(bnet);
+end
+
+priors = [0.1 1 10];
+P = length(priors);
+params = cell(1,P);
+for p=1:P
+ params{p} = cell(1,N);
+ for i=1:N
+ %params{p}{i} = {'prior', priors(p)};
+ params{p}{i} = {'prior_type', 'dirichlet', 'dirichlet_weight', priors(p)};
+ end
+end
+
+%sz = 1000:1000:10000;
+sz = 10:10:100;
+S = length(sz);
+bic_score = zeros(S, 1);
+bayes_score = zeros(S, P);
+for i=1:S
+ bic_score(i) = score_dags(data(:,1:sz(i)), ns, {dag}, 'scoring_fn', 'bic', 'params', []);
+end
+diff = zeros(S,P);
+for p=1:P
+ for i=1:S
+ bayes_score(i,p) = score_dags(data(:,1:sz(i)), ns, {dag}, 'params', params{p});
+ end
+end
+
+for p=1:P
+ for i=1:S
+ diff(i,p) = bayes_score(i,p)/ bic_score(i);
+ %diff(i,p) = abs(bayes_score(i,p) - bic_score(i));
+ end
+end
+
+if 0
+plot(sz, diff(:,1), 'g--*', sz, diff(:,2), 'b-.+', sz, diff(:,3), 'k:s');
+title('Relative BIC error vs. size of data set')
+legend('BDeu 0.1', 'BDeu 1', 'Bdeu 10', 2)
+end
+
+if 0
+plot(sz, bic_score, 'r-o', sz, bayes_score(:,1), 'g--*', sz, bayes_score(:,2), 'b-.+', sz, bayes_score(:,3), 'k:s');
+legend('bic', 'BDeu 0.01', 'BDeu 1', 'Bdeu 100')
+ylabel('score')
+title('score vs. size of data set')
+end
+
+%xlabel('num. data cases')
+
+%previewfig(gcf, 'format', 'png', 'height', 2, 'color', 'rgb')
+%exportfig(gcf, '/home/cs/murphyk/public_html/Bayes/Figures/bic.png', 'format', 'png', 'height', 2, 'color', 'rgb')
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/cooper_yoo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/cooper_yoo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+% Do the example in Cooper and Yoo, "Causal discovery from a mixture of experimental and
+% observational data", UAI 99, p120
+
+N = 2;
+dag = zeros(N);
+A = 1; B = 2;
+dag(A,B) = 1;
+ns = 2*ones(1,N);
+
+bnet0 = mk_bnet(dag, ns);
+%bnet0.CPD{A} = tabular_CPD(bnet0, A, 'unif', 1);
+bnet0.CPD{A} = tabular_CPD(bnet0, A, 'CPT', 'unif', 'prior_type', 'dirichlet');
+bnet0.CPD{B} = tabular_CPD(bnet0, B, 'CPT', 'unif', 'prior_type', 'dirichlet');
+
+samples = [2 2;
+ 2 1;
+ 2 2;
+ 1 1;
+ 1 2;
+ 2 2;
+ 1 1;
+ 2 2;
+ 1 2;
+ 2 1;
+ 1 1];
+
+clamped = [0 0;
+ 0 0;
+ 0 0;
+ 0 0;
+ 0 0;
+ 1 0;
+ 1 0;
+ 0 1;
+ 0 1;
+ 0 1;
+ 0 1];
+
+nsamples = size(samples, 1);
+
+% sequential version
+LL = 0;
+bnet = bnet0;
+for l=1:nsamples
+ ev = num2cell(samples(l,:)');
+ manip = find(clamped(l,:)');
+ LL = LL + log_marg_lik_complete(bnet, ev, manip);
+ bnet = bayes_update_params(bnet, ev, manip);
+end
+assert(approxeq(exp(LL), 5.97e-7)) % compare with result from UAI paper
+
+
+% batch version
+cases = num2cell(samples');
+LL2 = log_marg_lik_complete(bnet0, cases, clamped');
+bnet2 = bayes_update_params(bnet0, cases, clamped');
+
+assert(approxeq(LL, LL2))
+
+for j=1:N
+ s1 = struct(bnet.CPD{j}); % violate object privacy
+ s2 = struct(bnet2.CPD{j});
+ assert(approxeq(s1.CPT, s2.CPT))
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/k2demo1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/k2demo1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+N = 4;
+dag = zeros(N,N);
+%C = 1; S = 2; R = 3; W = 4;
+C = 4; S = 2; R = 3; W = 1; % arbitrary order
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+false = 1; true = 2;
+ns = 2*ones(1,N); % binary nodes
+
+bnet = mk_bnet(dag, ns);
+bnet.CPD{C} = tabular_CPD(bnet, C, [0.5 0.5]);
+bnet.CPD{R} = tabular_CPD(bnet, R, [0.8 0.2 0.2 0.8]);
+bnet.CPD{S} = tabular_CPD(bnet, S, [0.5 0.9 0.5 0.1]);
+bnet.CPD{W} = tabular_CPD(bnet, W, [1 0.1 0.1 0.01 0 0.9 0.9 0.99]);
+
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+ncases = 100;
+data = zeros(N, ncases);
+for m=1:ncases
+ data(:,m) = cell2num(sample_bnet(bnet));
+end
+
+order = [C S R W];
+max_fan_in = 2;
+
+%dag2 = learn_struct_K2(data, ns, order, 'max_fan_in', max_fan_in, 'verbose', 'yes');
+
+sz = 5:5:50;
+for i=1:length(sz)
+ dag2 = learn_struct_K2(data(:,1:sz(i)), ns, order, 'max_fan_in', max_fan_in);
+ correct(i) = isequal(dag, dag2);
+end
+correct
+
+for i=1:length(sz)
+ dag3 = learn_struct_K2(data(:,1:sz(i)), ns, order, 'max_fan_in', max_fan_in, 'scoring_fn', 'bic', 'params', []);
+ correct(i) = isequal(dag, dag3);
+end
+correct
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/mcmc1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/mcmc1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+% We compare MCMC structure learning with exhaustive enumeration of all dags.
+
+N = 3;
+%N = 4;
+dag = mk_rnd_dag(N);
+ns = 2*ones(1,N);
+bnet = mk_bnet(dag, ns);
+for i=1:N
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+ncases = 100;
+data = zeros(N, ncases);
+for m=1:ncases
+ data(:,m) = cell2num(sample_bnet(bnet));
+end
+
+dags = mk_all_dags(N);
+score = score_dags(data, ns, dags);
+post = normalise(exp(score));
+
+[sampled_graphs, accept_ratio] = learn_struct_mcmc(data, ns, 'nsamples', 100, 'burnin', 10);
+mcmc_post = mcmc_sample_to_hist(sampled_graphs, dags);
+
+if 0
+ subplot(2,1,1)
+ bar(post)
+ subplot(2,1,2)
+ bar(mcmc_post)
+ print(gcf, '-djpeg', '/home/cs/murphyk/public_html/Bayes/Figures/mcmc_post.jpg')
+
+ clf
+ plot(accept_ratio)
+ print(gcf, '-djpeg', '/home/cs/murphyk/public_html/Bayes/Figures/mcmc_accept.jpg')
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/model_select1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/model_select1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,121 @@
+% Bayesian model selection demo.
+
+% We generate data from the model A->B
+% and compute the posterior prob of all 3 dags on 2 nodes:
+% (1) A B, (2) A <- B , (3) A -> B
+% Models 2 and 3 are Markov equivalent, and therefore indistinguishable from
+% observational data alone.
+% Using the "difficult" params, the true model only gets a higher posterior after 2000 trials!
+% However, using the noisy NOT gate, the true model wins after 12 trials.
+
+% ground truth
+N = 2;
+dag = zeros(N);
+A = 1; B = 2;
+dag(A,B) = 1;
+
+difficult = 0;
+if difficult
+ ntrials = 2000;
+ ns = 3*ones(1,N);
+ true_bnet = mk_bnet(dag, ns);
+ rand('state', 0);
+ temp = 5;
+ for i=1:N
+ %true_bnet.CPD{i} = tabular_CPD(true_bnet, i, temp);
+ true_bnet.CPD{i} = tabular_CPD(true_bnet, i);
+ end
+else
+ ntrials = 25;
+ ns = 2*ones(1,N);
+ true_bnet = mk_bnet(dag, ns);
+ true_bnet.CPD{1} = tabular_CPD(true_bnet, 1, [0.5 0.5]);
+ pfail = 0.1;
+ psucc = 1-pfail;
+ true_bnet.CPD{2} = tabular_CPD(true_bnet, 2, [pfail psucc; psucc pfail]); % NOT gate
+end
+
+G = mk_all_dags(N);
+nhyp = length(G);
+hyp_bnet = cell(1, nhyp);
+for h=1:nhyp
+ hyp_bnet{h} = mk_bnet(G{h}, ns);
+ for i=1:N
+ % We must set the CPTs to the mean of the prior for sequential log_marg_lik to be correct
+ % The BDeu prior is score equivalent, so models 2,3 will be indistinguishable.
+ % The uniform Dirichlet prior is not score equivalent...
+ fam = family(G{h}, i);
+ hyp_bnet{h}.CPD{i}= tabular_CPD(hyp_bnet{h}, i, 'prior_type', 'dirichlet', ...
+ 'CPT', 'unif');
+ end
+end
+prior = normalise(ones(1, nhyp));
+
+% save results before doing sequential updating
+init_hyp_bnet = hyp_bnet;
+init_prior = prior;
+
+
+rand('state', 0);
+hyp_w = zeros(ntrials+1, nhyp);
+hyp_w(1,:) = prior(:)';
+
+data = zeros(N, ntrials);
+
+% First we compute the posteriors sequentially
+
+LL = zeros(1, nhyp);
+ll = zeros(1, nhyp);
+for t=1:ntrials
+ ev = cell2num(sample_bnet(true_bnet));
+ data(:,t) = ev;
+ for i=1:nhyp
+ ll(i) = log_marg_lik_complete(hyp_bnet{i}, ev);
+ hyp_bnet{i} = bayes_update_params(hyp_bnet{i}, ev);
+ end
+ prior = normalise(prior .* exp(ll));
+ LL = LL + ll;
+ hyp_w(t+1,:) = prior;
+end
+
+% Plot posterior model probabilities
+% Red = model 1 (no arcs), blue/green = models 2/3 (1 arc)
+% Blue = model 2 (2->1)
+% Green = model 3 (1->2, "ground truth")
+
+if 1
+ figure;
+m = size(hyp_w, 1);
+h=plot(1:m, hyp_w(:,1), 'r-', 1:m, hyp_w(:,2), 'b-.', 1:m, hyp_w(:,3), 'g:');
+axis([0 m 0 1])
+title('model posterior vs. time')
+%previewfig(gcf, 'format', 'png', 'height', 2, 'color', 'rgb')
+%exportfig(gcf, '/home/cs/murphyk/public_html/Bayes/Figures/model_select.png',...
+%'format', 'png', 'height', 2, 'color', 'rgb')
+drawnow
+end
+
+
+% Now check that batch updating gives same result
+hyp_bnet2 = init_hyp_bnet;
+prior2 = init_prior;
+
+cases = num2cell(data);
+LL2 = zeros(1, nhyp);
+for i=1:nhyp
+ LL2(i) = log_marg_lik_complete(hyp_bnet2{i}, cases);
+ hyp_bnet2{i} = bayes_update_params(hyp_bnet2{i}, cases);
+end
+
+
+assert(approxeq(LL, LL2))
+LL
+
+for i=1:nhyp
+ for j=1:N
+ s1 = struct(hyp_bnet{i}.CPD{j});
+ s2 = struct(hyp_bnet2{i}.CPD{j});
+ assert(approxeq(s1.CPT, s2.CPT))
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/model_select2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/model_select2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,83 @@
+% Online Bayesian model selection demo.
+
+% We generate data from the model A->B
+% and compute the posterior prob of all 3 dags on 2 nodes:
+% (1) A B, (2) A <- B , (3) A -> B
+% Models 2 and 3 are Markov equivalent, and therefore indistinguishable from
+% observational data alone.
+
+% We control the dependence of B on A by setting
+% P(B|A) = 0.5 - epislon and vary epsilon
+% as in Koller & Friedman book p512
+
+% ground truth
+N = 2;
+dag = zeros(N);
+A = 1; B = 2;
+dag(A,B) = 1;
+
+ntrials = 100;
+ns = 2*ones(1,N);
+true_bnet = mk_bnet(dag, ns);
+true_bnet.CPD{1} = tabular_CPD(true_bnet, 1, [0.5 0.5]);
+
+% hypothesis space
+G = mk_all_dags(N);
+nhyp = length(G);
+hyp_bnet = cell(1, nhyp);
+for h=1:nhyp
+ hyp_bnet{h} = mk_bnet(G{h}, ns);
+ for i=1:N
+ % We must set the CPTs to the mean of the prior for sequential log_marg_lik to be correct
+ % The BDeu prior is score equivalent, so models 2,3 will be indistinguishable.
+ % The uniform Dirichlet prior is not score equivalent...
+ fam = family(G{h}, i);
+ hyp_bnet{h}.CPD{i}= tabular_CPD(hyp_bnet{h}, i, 'prior_type', 'dirichlet', ...
+ 'CPT', 'unif');
+ end
+end
+
+clf
+seeds = 1:3;
+expt = 1;
+for seedi=1:length(seeds)
+ seed = seeds(seedi);
+ rand('state', seed);
+ randn('state', seed);
+
+ es = [0.05 0.1 0.15 0.2];
+ for ei=1:length(es)
+ e = es(ei);
+ true_bnet.CPD{2} = tabular_CPD(true_bnet, 2, [0.5+e 0.5-e; 0.5-e 0.5+e]);
+
+ prior = normalise(ones(1, nhyp));
+ hyp_w = zeros(ntrials+1, nhyp);
+ hyp_w(1,:) = prior(:)';
+ LL = zeros(1, nhyp);
+ ll = zeros(1, nhyp);
+ for t=1:ntrials
+ ev = cell2num(sample_bnet(true_bnet));
+ for i=1:nhyp
+ ll(i) = log_marg_lik_complete(hyp_bnet{i}, ev);
+ hyp_bnet{i} = bayes_update_params(hyp_bnet{i}, ev);
+ end
+ prior = normalise(prior .* exp(ll));
+ LL = LL + ll;
+ hyp_w(t+1,:) = prior;
+ end
+
+ % Plot posterior model probabilities
+ % Red = model 1 (no arcs), blue/green = models 2/3 (1 arc)
+ % Blue = model 2 (2->1)
+ % Green = model 3 (1->2, "ground truth")
+
+ subplot2(length(seeds), length(es), seedi, ei);
+ m = size(hyp_w,1);
+ h=plot(1:m, hyp_w(:,1), 'r-', 1:m, hyp_w(:,2), 'b-.', 1:m, hyp_w(:,3), 'g:');
+ axis([0 m 0 1])
+ %title('model posterior vs. time')
+ title(sprintf('e=%3.2f, seed=%d', e, seed));
+ drawnow
+ expt = expt + 1;
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/pc1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/pc1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+% SGS p118
+% Try learning the structure using an oracle for the cond indep tests
+
+n = 5;
+
+A = 1; B = 2; C = 3; D = 4; E = 5;
+
+G = zeros(n);
+G(A,B)=1;
+G(B,[C D]) = 1;
+G(C,E)=1;
+G(D,E)=1;
+
+k = 2;
+
+pdag = learn_struct_pdag_pc('dsep', n, k, G)
+
+
+
+
+if 0
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+pdag = learn_struct_pdag_pc('dsep', N, 2, dag)
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/pc2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/StructLearn/pc2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+% SGS p141 (female orgasm data set)
+
+C = eye(7,7);
+C(2,1:1) = [-0.132];
+C(3,1:2) = [0.009 -0.136];
+C(4,1:3) = [0.22 -0.166 0.403];
+C(5,1:4) = [-0.008 0.008 0.598 0.282];
+C(6,1:5) = [0.119 -0.076 0.264 0.514 0.176];
+C(7,1:6) = [0.118 -0.137 0.368 0.414 0.336 0.338];
+
+n = 7;
+for i=1:n
+ for j=i+1:n
+ C(i,j)=C(j,i);
+ end
+end
+
+max_fan_in = 4;
+nsamples = 281;
+alpha = 0.05;
+pdag = learn_struct_pdag_pc('cond_indep_fisher_z', n, max_fan_in, C, nsamples, alpha)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+/README/1.1.1.1/Wed May 29 15:59:54 2002//
+/csum.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/ffa.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mfa.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mfa_cl.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mfademo.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/rdiv.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/rprod.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/rsum.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/Zoubin
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/README
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/README Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,61 @@
+This software was downloaded from
+ http://www.gatsby.ucl.ac.uk/~zoubin/software.html
+with permission of the author.
+
+
+This software was written by
+
+Zoubin Ghahramani
+Dept of Computer Science
+University of Toronto
+zoubin@cs.toronto.edu
+
+This software is written in Matlab 4.2c and should run on all platforms
+supporting this version of Matlab. Matlab is a commercial software
+package available from The MathWorks (http://www.mathworks.com/).
+
+This software is meant for free non-commercial use and distribution. See the
+copyright notice at the bottom of this page.
+
+If you use it, please refer to the accompanying technical report:
+
+Ghahramani, Z. and Hinton, G.E. (1996) The EM Algorithm for Mixtures
+of Factor Analyzers. University of Toronto Technical Report CRG-TR-96-1.
+Available at ftp://ftp.cs.toronto.edu/pub/zoubin/tr-96-1.ps.gz
+
+If you find bugs, or would like to see if I've implemented any
+extensions, please send me email at zoubin@cs.toronto.edu. The
+software is provided "as is", and I cannot guarantee I will be able
+to fix all problems or answer all inquiries.
+
+See mfademo.m for a demo.
+
+Hope you find it useful. Please send me email if you find it useful
+and I will put you on a mailing list announcing releases of other
+statistical machine learning software in Matlab.
+
+
+----------------------------------------------------------------------
+ Copyright (c) 1996 by Zoubin Ghahramani
+ Toronto, Ontario, Canada.
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for non-commercial purposes only is hereby granted
+without fee, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that my name not be used in
+advertising or publicity pertaining to distribution of the software
+without specific, written prior permission. I make no representations
+about the suitability of this software for any purpose. It is provided
+"as is" without express or implied warranty.
+
+I disclaim all warranties with regard to this software, including all
+implied warranties of merchantability and fitness. In no event shall I
+be liable for any special, indirect or consequential damages or any
+damages whatsoever resulting from loss of use, data or profits,
+whether in an action of contract, negligence or other tortious action,
+arising out of or in connection with the use or performance of this
+software.
+
+Zoubin Ghahramani Dec 17, 1996
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/csum.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/csum.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+% column sum
+% function Z=csum(X)
+
+function Z=csum(X)
+
+N=length(X(:,1));
+if (N>1)
+ Z=sum(X);
+else
+ Z=X;
+end;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/ffa.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/ffa.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,75 @@
+% function [L,Ph,LL]=ffa(X,K,cyc,tol);
+%
+% Fast Maximum Likelihood Factor Analysis using EM
+%
+% X - data matrix
+% K - number of factors
+% cyc - maximum number of cycles of EM (default 100)
+% tol - termination tolerance (prop change in likelihood) (default 0.0001)
+%
+% L - factor loadings
+% Ph - diagonal uniquenesses matrix
+% LL - log likelihood curve
+%
+% Iterates until a proportional change < tol in the log likelihood
+% or cyc steps of EM
+%
+
+function [L,Ph,LL]=ffa(X,K,cyc,tol);
+
+if nargin<4 tol=0.0001; end;
+if nargin<3 cyc=100; end;
+
+N=length(X(:,1));
+D=length(X(1,:));
+tiny=exp(-700);
+
+X=X-ones(N,1)*mean(X);
+XX=X'*X/N;
+diagXX=diag(XX);
+
+randn('seed', 0);
+cX=cov(X);
+scale=det(cX)^(1/D);
+L=randn(D,K)*sqrt(scale/K);
+Ph=diag(cX);
+
+I=eye(K);
+
+lik=0; LL=[];
+
+const=-D/2*log(2*pi);
+
+
+for i=1:cyc;
+
+ %%%% E Step %%%%
+ Phd=diag(1./Ph);
+ LP=Phd*L;
+ MM=Phd-LP*inv(I+L'*LP)*LP';
+ dM=sqrt(det(MM));
+ beta=L'*MM;
+ XXbeta=XX*beta';
+ EZZ=I-beta*L +beta*XXbeta;
+
+ %%%% Compute log likelihood %%%%
+
+ oldlik=lik;
+ lik=N*const+N*log(dM)-0.5*N*sum(diag(MM*XX));
+ fprintf('cycle %i lik %g \n',i,lik);
+ LL=[LL lik];
+
+ %%%% M Step %%%%
+
+ L=XXbeta*inv(EZZ);
+ Ph=diagXX-diag(L*XXbeta');
+
+ if (i<=2)
+ likbase=lik;
+ elseif (lik0);
+ end;
+
+ Phmin=exp(-700);
+ Ph=Ph.*(Ph>Phmin)+(Ph<=Phmin)*Phmin; % to avoid zero variances
+
+ % priors
+ Pi=s'/s2;
+
+ end;
+ fprintf('\n');
+end;
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/mfa_cl.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/mfa_cl.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,54 @@
+% function [lik, likv]=mfa_cl(X,Lh,Ph,Mu,Pi);
+%
+% Calculates log likelihoods of a data set under a mixture of factor
+% analysis model.
+%
+% X - data matrix
+% Lh - factor loadings
+% Ph - diagonal uniquenesses matrix
+% Mu - mean vectors
+% Pi - priors
+%
+% lik - log likelihood of X
+% likv - vector of log likelihoods
+%
+% If 0 or 1 output arguments requested, lik is returned. If 2 output
+% arguments requested, [lik likv] is returned.
+
+function [lik, likv]=mfa_cl(X,Lh,Ph,Mu,Pi);
+
+N=length(X(:,1));
+D=length(X(1,:));
+K=length(Lh(1,:));
+M=length(Pi);
+
+if (abs(sum(Pi)-1) > 1e-6)
+ disp('ERROR: Pi should sum to 1');
+ return;
+elseif ((size(Lh) ~= [D*M K]) | (size(Ph) ~= [D 1]) | (size(Mu) ~= [M D]) ...
+ | (size(Pi) ~= [M 1] & size(Pi) ~= [1 M]))
+ disp('ERROR in input matrix sizes');
+ return;
+end;
+
+tiny=exp(-744);
+const=(2*pi)^(-D/2);
+
+I=eye(K);
+Phi=1./Ph;
+Phid=diag(Phi);
+for k=1:M
+ Lht=Lh((k-1)*D+1:k*D,:);
+ LP=Phid*Lht;
+ MM=Phid-LP*inv(I+Lht'*LP)*LP';
+ dM=sqrt(det(MM));
+ Xk=(X-ones(N,1)*Mu(k,:));
+ XM=Xk*MM;
+ H(:,k)=const*Pi(k)*dM*exp(-0.5*sum((XM.*Xk)'))';
+end;
+
+Hsum=rsum(H);
+
+likv=log(Hsum+(Hsum==0)*tiny);
+lik=sum(likv);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/mfademo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/mfademo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,81 @@
+echo on;
+
+clc;
+
+% This is a very basic demo of the mixture of factor analyzer software
+% written in Matlab by Zoubin Ghahramani
+% Dept of Computer Science
+% University of Toronto
+
+pause; % Hit any key to continue
+
+% To demonstrate the software we generate a sample data set
+% from a mixture of two Gaussians
+
+pause; % Hit any key to continue
+
+X1=randn(300,5); % zero mean 5 dim Gaussian data
+X2=randn(200,5)+2; % 5 dim Gaussian data with mean [1 1 1 1 1]
+X=[X1;X2]; % total 500 data points from mixture
+
+% Fitting the model is very easy. For example to fit a mixture of 2
+% factor analyzers with three factors each...
+
+pause; % Hit any key to continue
+
+
+[Lh,Ph,Mu,Pi,LL]=mfa(X,2,3);
+
+% Lh, Ph, Mu, and Pi are the factor loadings, observervation
+% variances, observation means for each mixture, and mixing
+% proportions. LL is the vector of log likelihoods (the learning
+% curve). For more information type: help mfa
+
+% to plot the learning curve (log likelihood at each step of EM)...
+
+pause; % Hit any key to continue
+
+plot(LL);
+
+% you get a more informative picture of convergence by looking at the
+% log of the first difference of the log likelihoods...
+
+pause; % Hit any key to continue
+
+semilogy(diff(LL));
+
+% you can look at some of the parameters of the fitted model...
+
+pause; % Hit any key to continue
+
+Mu
+
+Pi
+
+% ...to see whether they make any sense given that me know how the
+% data was generated.
+
+% you can also evaluate the log likelihood of another data set under
+% the model we have just fitted using the mfa_cl (for Calculate
+% Likelihood) function. For example, here we generate a test from the
+% same distribution.
+
+
+X1=randn(300,5);
+X2=randn(200,5)+2;
+Xtest=[X1; X2];
+
+pause; % Hit any key to continue
+
+mfa_cl(Xtest,Lh,Ph,Mu,Pi)
+
+% we should expect the log likelihood of the test set to be lower than
+% that of the training set.
+
+% finally, we can also fit a regular factor analyzer using the ffa
+% function (Fast Factor Analysis)...
+
+pause; % Hit any key to continue
+
+[L,Ph,LL]=ffa(X,3);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/rdiv.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/Zoubin/rdiv.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+% function Z=rdiv(X,Y)
+%
+% row division: Z = X / Y row-wise
+% Y must have one column
+
+function Z=rdiv(X,Y)
+
+[N M]=size(X);
+[K L]=size(Y);
+if(N ~= K | L ~=1)
+ disp('Error in RDIV');
+ return;
+end
+
+Z=zeros(N,M);
+
+if M P(Burglary = false) = 0.999)
+P(Earthquake = true) = 0.002 (=> P(Earthquake = false) = 0.998)
+
+P(Alarm = true | Burglary = true, Earthquake = true) = 0.95
+P(Alarm = true | Burglary = true, Earthquake = false) = 0.94
+P(Alarm = true | Burglary = false, Earthquake = true) = 0.29
+P(Alarm = true | Burglary = false, Earthquake = false) = 0.001
+
+P(JohnCalls = true | Alarm = true) = 0.90
+P(JohnCalls = true | Alarm = false) = 0.05
+
+P(MaryCalls = true | Alarm = true) = 0.70
+P(MaryCalls = true | Alarm = false) = 0.01
+|#
+
+(setf *burglar-alarm-net*
+ '((MaryCalls (true false)
+ (Alarm)
+ ((true) 0.70 0.30)
+ ((false) 0.01 0.99))
+ (JohnCalls (true false)
+ (Alarm)
+ ((true) 0.90 0.10)
+ ((false) 0.05 0.95))
+ (Alarm (true false)
+ (Burglary Earthquake)
+ ((true true) 0.95 0.05)
+ ((true false) 0.94 0.06)
+ ((false true) 0.29 0.71)
+ ((false false) 0.001 0.999))
+ (Burglary (true false)
+ ()
+ (0.001 0.999))
+ (Earthquake (true false)
+ ()
+ (0.002 0.998))
+ ))
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/burglary.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/burglary.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+% Burglar alarm example
+
+N = 5;
+dag = zeros(N,N);
+E = 1; B = 2; R = 3; A = 4; C = 5;
+dag(E,[R A]) = 1;
+dag(B,A) = 1;
+dag(A,C)=1;
+
+% true = state 1, false = state 2
+ns = 2*ones(1,N); % binary nodes
+bnet = mk_bnet(dag, ns);
+
+bnet.CPD{E} = tabular_CPD(bnet, E, [0.1 0.9]);
+bnet.CPD{B} = tabular_CPD(bnet, B, [0.01 0.99]);
+%bnet.CPD{R} = tabular_CPD(bnet, R, [0.65 0.00001 0.35 0.99999]);
+bnet.CPD{R} = tabular_CPD(bnet, R, [0.65 0.01 0.35 0.99]);
+bnet.CPD{A} = tabular_CPD(bnet, A, [0.95 0.8 0.3 0.001 0.05 0.2 0.7 0.999]);
+bnet.CPD{C} = tabular_CPD(bnet, C, [0.7 0.05 0.3 0.95]);
+
+
+engine = jtree_inf_engine(bnet);
+ev = cell(1,N);
+ev{C} = 1;
+engine = enter_evidence(engine, ev);
+mE = marginal_nodes(engine, E);
+mB = marginal_nodes(engine, B);
+fprintf('P(E|c)=%5.3f, P(B|c)=%5.3f\n', mE.T(1), mB.T(1))
+
+ev{C} = 1;
+ev{R} = 1;
+engine = enter_evidence(engine, ev);
+mE = marginal_nodes(engine, E);
+mB = marginal_nodes(engine, B);
+fprintf('P(E|c,r)=%5.3f, P(B|c,r)=%5.3f\n', mE.T(1), mB.T(1))
+
+
+if 0
+nsamples = 100;
+samples = zeros(nsamples, 5);
+for i=1:nsamples
+ samples(i,:) = cell2num(sample_bnet(bnet))';
+end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/cg1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/cg1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+% Conditional Gaussian network
+% The waste incinerator emissions example from Lauritzen (1992),
+% "Propogation of Probabilities, Means and Variances in Mixed Graphical Association Models",
+% JASA 87(420): 1098--1108
+%
+% This example is reprinted on p145 of "Probabilistic Networks and Expert Systems",
+% Cowell, Dawid, Lauritzen and Spiegelhalter, 1999, Springer.
+%
+% For a picture, see http://www.cs.berkeley.edu/~murphyk/Bayes/usage.html#cg_model
+
+ns = 2*ones(1,9);
+%bnet = mk_incinerator_bnet(ns);
+bnet = mk_incinerator_bnet;
+
+engines = {};
+%engines{end+1} = stab_cond_gauss_inf_engine(bnet);
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = cond_gauss_inf_engine(bnet);
+nengines = length(engines);
+
+F = 1; W = 2; E = 3; B = 4; C = 5; D = 6; Min = 7; Mout = 8; L = 9;
+n = 9;
+dnodes = [B F W];
+cnodes = mysetdiff(1:n, dnodes);
+
+evidence = cell(1,n); % no evidence
+ll = zeros(1, nengines);
+for e=1:nengines
+ [engines{e}, ll(e)] = enter_evidence(engines{e}, evidence);
+end
+%assert(approxeq(ll(1), ll)))
+ll
+
+% Compare to the results in table on p1107.
+% These results are printed to 3dp in Cowell p150
+
+mu = zeros(1,n);
+sigma = zeros(1,n);
+dprob = zeros(1,n);
+addev = 1;
+tol = 1e-2;
+for e=1:nengines
+ for i=cnodes(:)'
+ m = marginal_nodes(engines{e}, i, addev);
+ mu(i) = m.mu;
+ sigma(i) = sqrt(m.Sigma);
+ end
+ for i=dnodes(:)'
+ m = marginal_nodes(engines{e}, i, addev);
+ dprob(i) = m.T(1);
+ end
+ assert(approxeq(mu([E D C L Min Mout]), [-3.25 3.04 -1.85 1.48 -0.214 2.83], tol))
+ assert(approxeq(sigma([E D C L Min Mout]), [0.709 0.770 0.507 0.631 0.459 0.860], tol))
+ assert(approxeq(dprob([B F W]), [0.85 0.95 0.29], tol))
+ %m = marginal_nodes(engines{e}, bnet.names('E'), addev);
+ %assert(approxeq(m.mu, -3.25, tol))
+ %assert(approxeq(sqrt(m.Sigma), 0.709, tol))
+end
+
+% Add evidence (p 1105, top right)
+evidence = cell(1,n);
+evidence{W} = 1; % industrial
+evidence{L} = 1.1;
+evidence{C} = -0.9;
+
+ll = zeros(1, nengines);
+for e=1:nengines
+ [engines{e}, ll(e)] = enter_evidence(engines{e}, evidence);
+end
+assert(all(approxeq(ll(1), ll)))
+
+for e=1:nengines
+ for i=cnodes(:)'
+ m = marginal_nodes(engines{e}, i, addev);
+ mu(i) = m.mu;
+ sigma(i) = sqrt(m.Sigma);
+ end
+ for i=dnodes(:)'
+ m = marginal_nodes(engines{e}, i, addev);
+ dprob(i) = m.T(1);
+ end
+ assert(approxeq(mu([E D C L Min Mout]), [-3.90 3.61 -0.9 1.1 0.5 4.11], tol))
+ assert(approxeq(sigma([E D C L Min Mout]), [0.076 0.326 0 0 0.1 0.344], tol))
+ assert(approxeq(dprob([B F W]), [0.0122 0.9995 1], tol))
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/cg2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/cg2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+% Conditional Gaussian network with vector-valued nodes and random params
+
+ns = 2*ones(1,9);
+bnet = mk_incinerator_bnet(ns);
+
+engines = {};
+%engines{end+1} = stab_cond_gauss_inf_engine(bnet);
+engines{end+1} = jtree_inf_engine(bnet);
+engines{end+1} = cond_gauss_inf_engine(bnet);
+
+[err, time] = cmp_inference_static(bnet, engines, 'singletons_only', 1);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/cmp_inference_static.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/cmp_inference_static.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,114 @@
+function [time, engine] = cmp_inference_static(bnet, engine, varargin)
+% CMP_INFERENCE Compare several inference engines on a BN
+% function [time, engine] = cmp_inference_static(bnet, engine, ...)
+%
+% engine{i} is the i'th inference engine.
+% time(e) = elapsed time for doing inference with engine e
+%
+% The list below gives optional arguments [default value in brackets].
+%
+% exact - specifies which engines do exact inference [ 1:length(engine) ]
+% singletons_only - if 1, we only call marginal_nodes, else this and marginal_family [0]
+% maximize - 1 means we do max-propagation, 0 means sum-propagation [0]
+% check_ll - 1 means we check that the log-likelihoods are correct [1]
+% observed - list of the observed ndoes [ bnet.observed ]
+% check_converged - list of loopy engines that should be checked for convergence [ [] ]
+% If an engine has converged, it is added to the exact list.
+
+
+% set default params
+exact = 1:length(engine);
+singletons_only = 0;
+maximize = 0;
+check_ll = 1;
+observed = bnet.observed;
+check_converged = [];
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'exact', exact = args{i+1};
+ case 'singletons_only', singletons_only = args{i+1};
+ case 'maximize', maximize = args{i+1};
+ case 'check_ll', check_ll = args{i+1};
+ case 'observed', observed = args{i+1};
+ case 'check_converged', check_converged = args{i+1};
+ otherwise,
+ error(['unrecognized argument ' args{i}])
+ end
+end
+
+E = length(engine);
+ref = exact(1); % reference
+
+N = length(bnet.dag);
+ev = sample_bnet(bnet);
+evidence = cell(1,N);
+evidence(observed) = ev(observed);
+%celldisp(evidence(observed))
+
+for i=1:E
+ tic;
+ if check_ll
+ [engine{i}, ll(i)] = enter_evidence(engine{i}, evidence, 'maximize', maximize);
+ else
+ engine{i} = enter_evidence(engine{i}, evidence, 'maximize', maximize);
+ end
+ time(i)=toc;
+end
+
+for i=check_converged(:)'
+ niter = loopy_converged(engine{i});
+ if niter > 0
+ fprintf('loopy engine %d converged in %d iterations\n', i, niter);
+% exact = myunion(exact, i);
+ else
+ fprintf('loopy engine %d has not converged\n', i);
+ end
+end
+
+cmp = exact(2:end);
+if check_ll
+ for i=cmp(:)'
+ assert(approxeq(ll(ref), ll(i)));
+ end
+end
+
+hnodes = mysetdiff(1:N, observed);
+
+if ~singletons_only
+ get_marginals(engine, hnodes, exact, 0);
+end
+get_marginals(engine, hnodes, exact, 1);
+
+%%%%%%%%%%
+
+function get_marginals(engine, hnodes, exact, singletons)
+
+bnet = bnet_from_engine(engine{1});
+N = length(bnet.dag);
+cnodes_bitv = zeros(1,N);
+cnodes_bitv(bnet.cnodes) = 1;
+ref = exact(1); % reference
+cmp = exact(2:end);
+E = length(engine);
+
+for n=hnodes(:)'
+ for e=1:E
+ if singletons
+ m{e} = marginal_nodes(engine{e}, n);
+ else
+ m{e} = marginal_family(engine{e}, n);
+ end
+ end
+ for e=cmp(:)'
+ if cnodes_bitv(n)
+ assert(approxeq(m{ref}.mu, m{e}.mu))
+ assert(approxeq(m{ref}.Sigma, m{e}.Sigma))
+ else
+ assert(approxeq(m{ref}.T, m{e}.T))
+ end
+ assert(isequal(m{e}.domain, m{ref}.domain));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/discrete1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/discrete1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+% Compare various inference engines on the following network (from Jensen (1996) p84 fig 4.17)
+% 1
+% / | \
+% 2 3 4
+% | | |
+% 5 6 7
+% \/ \/
+% 8 9
+% where all arcs point downwards
+
+N = 9;
+dag = zeros(N,N);
+dag(1,2)=1; dag(1,3)=1; dag(1,4)=1;
+dag(2,5)=1; dag(3,6)=1; dag(4,7)=1;
+dag(5,8)=1; dag(6,8)=1; dag(6,9)=1; dag(7,9) = 1;
+
+dnodes = 1:N;
+false = 1; true = 2;
+ns = 2*ones(1,N); % binary nodes
+
+onodes = [2 7];
+bnet = mk_bnet(dag, ns, 'observed', onodes);
+% use random params
+for i=1:N
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+query = [3];
+engine = {};
+engine{end+1} = jtree_inf_engine(bnet);
+engine{end+1} = var_elim_inf_engine(bnet);
+%engine{end+1} = global_joint_inf_engine(bnet);
+% global joint is designed for limids because does not normalize
+
+%engine{end+1} = enumerative_inf_engine(bnet);
+%engine{end+1} = jtree_onepass_inf_engine(bnet, query, onodes);
+
+maximize = 0; % jtree_ndx crashes on max-prop
+[err, time] = cmp_inference_static(bnet, engine, 'maximize', maximize);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/discrete2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/discrete2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+% Compare various inference engines on the following network (from Jensen (1996) p84 fig 4.17)
+% 1
+% / | \
+% 2 3 4
+% | | |
+% 5 6 7
+% \/ \/
+% 8 9
+% where all arcs point downwards
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+N = 9;
+dag = zeros(N,N);
+dag(1,2)=1; dag(1,3)=1; dag(1,4)=1;
+dag(2,5)=1; dag(3,6)=1; dag(4,7)=1;
+dag(5,8)=1; dag(6,8)=1; dag(6,9)=1; dag(7,9) = 1;
+
+dnodes = 1:N;
+false = 1; true = 2;
+ns = 2*ones(1,N); % binary nodes
+
+onodes = [2 4];
+bnet = mk_bnet(dag, ns, 'observed', onodes);
+% use random params
+for i=1:N
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+%USEC = exist('@jtree_C_inf_engine/collect_evidence','file');
+query = [3];
+engine = {};
+engine{end+1} = jtree_inf_engine(bnet);
+engine{end+1} = jtree_sparse_inf_engine(bnet);
+%engine{end+1} = jtree_ndx_inf_engine(bnet, 'ndx_type', 'SD');
+%engine{end+1} = jtree_ndx_inf_engine(bnet, 'ndx_type', 'B');
+%engine{end+1} = jtree_ndx_inf_engine(bnet, 'ndx_type', 'D');
+%if USEC, engine{end+1} = jtree_C_inf_engine(bnet); end
+%engine{end+1} = var_elim_inf_engine(bnet);
+%engine{end+1} = enumerative_inf_engine(bnet);
+%engine{end+1} = jtree_onepass_inf_engine(bnet, query, onodes);
+
+maximize = 0; % jtree_ndx crashes on max-prop
+[err, time] = cmp_inference_static(bnet, engine, 'maximize', maximize);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/discrete3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/discrete3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+% Compare various inference engines on the following network (from Jensen (1996) p84 fig 4.17)
+% 1
+% / | \
+% 2 3 4
+% | | |
+% 5 6 7
+% \/ \/
+% 8 9
+% where all arcs point downwards
+
+N = 9;
+dag = zeros(N,N);
+dag(1,2)=1; dag(1,3)=1; dag(1,4)=1;
+dag(2,5)=1; dag(3,6)=1; dag(4,7)=1;
+dag(5,8)=1; dag(6,8)=1; dag(6,9)=1; dag(7,9) = 1;
+
+dnodes = 1:N;
+false = 1; true = 2;
+ns = 2*ones(1,N); % binary nodes
+
+onodes = [1];
+evidence = cell(1,N);
+evidence(onodes) = num2cell(1);
+bnet = mk_bnet(dag, ns, 'observed', onodes);
+% use random params
+%for i=1:N
+% bnet.CPD{i} = tabular_CPD(bnet, i);
+%end
+bnet.CPD{1} = tabular_CPD(bnet, 1, 'sparse', 1, 'CPT', [0.8, 0.2]);
+bnet.CPD{2} = tabular_CPD(bnet, 2, 'sparse', 1, 'CPT', [1 0 0 1]);
+bnet.CPD{3} = tabular_CPD(bnet, 3, 'sparse', 1, 'CPT', [0 1 1 0]);
+bnet.CPD{4} = tabular_CPD(bnet, 4, 'sparse', 1, 'CPT', [1 1 0 0]);
+bnet.CPD{5} = tabular_CPD(bnet, 5, 'sparse', 1, 'CPT', [0 0 1 1]);
+bnet.CPD{6} = tabular_CPD(bnet, 6, 'sparse', 1, 'CPT', [1 0 0 1]);
+bnet.CPD{7} = tabular_CPD(bnet, 7, 'sparse', 1, 'CPT', [0 1 1 0]);
+bnet.CPD{8} = tabular_CPD(bnet, 8, 'sparse', 1, 'CPT', [1 1 0 0 0 0 1 1]);
+bnet.CPD{9} = tabular_CPD(bnet, 9, 'sparse', 1, 'CPT', [0 1 0 1 1 0 1 0]);
+
+engine = jtree_sparse_inf_engine(bnet);
+tic
+[engine, ll] = enter_evidence(engine, evidence);
+toc
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/test_housing.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/test_restaurants.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/test_zoo1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/tmp.dot/1.1.1.1/Wed May 29 15:59:54 2002//
+/transform_data_into_bnt_format.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/dtree
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/test_housing.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/test_housing.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,87 @@
+% Here the training data is adapted from UCI ML repository, 'housing' data
+% Input variables: 12 continous, one binary
+% Ouput variables: continous
+% The testing result trace is in the end of this script, it is same to the graph in page 219 of
+% Leo Brieman etc. 1984 book titled "Classification and regression trees".
+
+dtreeCPD=tree_CPD;
+
+% load data
+fname = fullfile(BNT_HOME, 'examples', 'static', 'uci_data', 'housing', 'housing.data');
+data=load(fname);
+data=data';
+data=transform_data_into_bnt_format(data,[1:3,5:14]);
+
+% learn decision tree from data
+ns=1*ones(1,14);
+ns(4)=2;
+dtreeCPD1=learn_params(dtreeCPD,1:14,data,ns,[1:3,5:14],'stop_cases',5,'min_gain',0.006);
+
+% evaluate on data
+[score,outputs]=evaluate_tree_performance(dtreeCPD1,1:14,data,ns,[1:3,5:14]);
+fprintf('Mean square deviation (using regression tree to predict) in old training data %6.3f\n',score);
+
+
+% show decision tree using graphpad
+% It should be easy, but still not implemented
+
+
+
+% >> test_housing
+% Create node 1 split at 6 gain 38.2205 Th 6.939000e+000. Mean 22.5328 Cases 506
+% Create node 2 split at 13 gain 14.4503 Th 1.437000e+001. Mean 19.9337 Cases 430
+% Create node 3 split at 8 gain 4.9809 Th 1.358000e+000. Mean 23.3498 Cases 255
+% Create node 4 split at 1 gain 0.7722 Th 1.023300e+001. Mean 45.5800 Cases 5
+% Create leaf node(samevalue) 5. Mean 50.0000 Std 0.0000 Cases 4
+% Add subtree node 5 to 4. #nodes 5
+% Create leaf node(samevalue) 6. Mean 27.9000 Std 0.0000 Cases 1
+% Add subtree node 6 to 4. #nodes 6
+% Add subtree node 4 to 3. #nodes 6
+% Create node 7 split at 6 gain 2.8497 Th 6.540000e+000. Mean 22.9052 Cases 250
+% Create node 8 split at 13 gain 0.5970 Th 7.560000e+000. Mean 21.6297 Cases 195
+% Create leaf node(nogain) 9. Mean 23.9698 Std 1.7568 Cases 43
+% Add subtree node 9 to 8. #nodes 9
+% Create leaf node(nogain) 10. Mean 20.9678 Std 2.8242 Cases 152
+% Add subtree node 10 to 8. #nodes 10
+% Add subtree node 8 to 7. #nodes 10
+% Create leaf node(nogain) 11. Mean 27.4273 Std 3.4512 Cases 55
+% Add subtree node 11 to 7. #nodes 11
+% Add subtree node 7 to 3. #nodes 11
+% Add subtree node 3 to 2. #nodes 11
+% Create node 12 split at 1 gain 2.2467 Th 6.962150e+000. Mean 14.9560 Cases 175
+% Create node 13 split at 5 gain 0.5172 Th 5.240000e-001. Mean 17.1376 Cases 101
+% Create leaf node(nogain) 14. Mean 20.0208 Std 3.0672 Cases 24
+% Add subtree node 14 to 13. #nodes 14
+% Create leaf node(nogain) 15. Mean 16.2390 Std 2.9746 Cases 77
+% Add subtree node 15 to 13. #nodes 15
+% Add subtree node 13 to 12. #nodes 15
+% Create node 16 split at 5 gain 0.6133 Th 6.050000e-001. Mean 11.9784 Cases 74
+% Create leaf node(nogain) 17. Mean 16.6333 Std 4.5052 Cases 12
+% Add subtree node 17 to 16. #nodes 17
+% Create leaf node(nogain) 18. Mean 11.0774 Std 3.0090 Cases 62
+% Add subtree node 18 to 16. #nodes 18
+% Add subtree node 16 to 12. #nodes 18
+% Add subtree node 12 to 2. #nodes 18
+% Add subtree node 2 to 1. #nodes 18
+% Create node 19 split at 6 gain 6.0493 Th 7.420000e+000. Mean 37.2382 Cases 76
+% Create node 20 split at 1 gain 1.9900 Th 7.367110e+000. Mean 32.1130 Cases 46
+% Create node 21 split at 8 gain 0.6273 Th 1.877300e+000. Mean 33.3488 Cases 43
+% Create leaf node(samevalue) 22. Mean 45.6500 Std 6.1518 Cases 2
+% Add subtree node 22 to 21. #nodes 22
+% Create leaf node(nogain) 23. Mean 32.7488 Std 3.5690 Cases 41
+% Add subtree node 23 to 21. #nodes 23
+% Add subtree node 21 to 20. #nodes 23
+% Create leaf node(samevalue) 24. Mean 14.4000 Std 3.7363 Cases 3
+% Add subtree node 24 to 20. #nodes 24
+% Add subtree node 20 to 19. #nodes 24
+% Create node 25 split at 1 gain 1.1001 Th 2.733970e+000. Mean 45.0967 Cases 30
+% Create leaf node(nogain) 26. Mean 45.8966 Std 4.4005 Cases 29
+% Add subtree node 26 to 25. #nodes 26
+% Create leaf node(samevalue) 27. Mean 21.9000 Std 0.0000 Cases 1
+% Add subtree node 27 to 25. #nodes 27
+% Add subtree node 25 to 19. #nodes 27
+% Add subtree node 19 to 1. #nodes 27
+% Mean square deviation (using regression tree to predict) in old training data 9.405
+%
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/test_restaurants.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/test_restaurants.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,98 @@
+% Here the training data is adapted from Russell95 book. See restaurant.names for description.
+% (1) Use infomation-gain as the split testing score, we get the the same decision tree as the book Russell 95 (page 537),
+% and the Gain(Patrons) is 0.5409, equal to the result in Page 541 of Russell 95. (see below output trace)
+% (Note: the dtree in that book has small compilation error, the Type node is from YES of Hungry node, not NO.)
+% (2) Use gain-ratio (Quilan 93), the splitting defavorite attribute with more values. (e.g. the Type attribute here)
+
+dtreeCPD=tree_CPD;
+
+% load data
+fname = fullfile(BNT_HOME, 'examples', 'static', 'uci_data', 'restaurant', 'restaurant.data');
+data=load(fname);
+data=data';
+
+%make the data be BNT compliant (values for discrete nodes are from 1-n, here n is the node size)
+ % e.g. if the values are [0 1 6], they must be mapping to [1 2 3]
+%data=transform_data(data,'tmp.dat',[]); %here no cts nodes
+
+% learn decision tree from data
+ns=2*ones(1,11);
+ns(5:6)=3;
+ns(9:10)=4;
+dtreeCPD1=learn_params(dtreeCPD,1:11,data,ns,[]);
+
+% evaluate on data
+[score,outputs]=evaluate_tree_performance(dtreeCPD1,1:11,data,ns,[]);
+fprintf('Accuracy in training data %6.3f\n',score);
+
+% show decision tree using graphpad
+
+
+
+% --------------------------Output trace: using Information-Gain------------------------------
+% The splits are Patron, Hungry, Type, Fri/Sat
+% *********************************
+% Create node 1 split at 5 gain 0.5409 Th 0. Class 1 Cases 12 Error 6
+% Create leaf node(onecla) 2. Class 1 Cases 2 Error 0
+% Add subtree node 2 to 1. #nodes 2
+% Create leaf node(onecla) 3. Class 2 Cases 4 Error 0
+% Add subtree node 3 to 1. #nodes 3
+% Create node 4 split at 4 gain 0.2516 Th 0. Class 1 Cases 6 Error 2
+% Create leaf node(onecla) 5. Class 1 Cases 2 Error 0
+% Add subtree node 5 to 4. #nodes 5
+% Create node 6 split at 9 gain 0.5000 Th 0. Class 1 Cases 4 Error 2
+% Create leaf node(nullset) 7. Father 6 Class 1
+% Create node 8 split at 3 gain 1.0000 Th 0. Class 1 Cases 2 Error 1
+% Create leaf node(onecla) 9. Class 1 Cases 1 Error 0
+% Add subtree node 9 to 8. #nodes 9
+% Create leaf node(onecla) 10. Class 2 Cases 1 Error 0
+% Add subtree node 10 to 8. #nodes 10
+% Add subtree node 8 to 6. #nodes 10
+% Create leaf node(onecla) 11. Class 2 Cases 1 Error 0
+% Add subtree node 11 to 6. #nodes 11
+% Create leaf node(onecla) 12. Class 1 Cases 1 Error 0
+% Add subtree node 12 to 6. #nodes 12
+% Add subtree node 6 to 4. #nodes 12
+% Add subtree node 4 to 1. #nodes 12
+% ********************************
+%
+% Note:
+% ***Create node 4 split at 4 gain 0.2516 Th 0. Class 1 Cases 6 Error 2
+% This mean we create a new node number 4, it is splitting at the attribute 4, and info-gain is 0.2516,
+% "Th 0" means threshhold for splitting continous attribute, "Class 1" means the majority class at node 4 is 1,
+% and "Cases 6" means it has 6 cases attached to it, "Error 2" means it has two errors if changing the class lable of
+% all the cases in it to the majority class.
+% *** Add subtree node 12 to 6. #nodes 12
+% It means we add the child node 12 to node 6.
+% *** Create leaf node(onecla) 10. Class 2 Cases 1 Error 0
+% here 'onecla' means all cases in this node belong to one class, so no need to split further.
+% 'nullset' means no training cases belong to this node, we use its parent node majority class as its class
+%
+%
+%
+% ---------------Output trace: using GainRatio-----------------------
+% The splits are Patron, Hungry, Fri/Sat, Price
+%
+%
+% Create node 1 split at 5 gain 0.3707 Th 0. Class 1 Cases 12 Error 6
+% Create leaf node(onecla) 2. Class 1 Cases 2 Error 0
+% Add subtree node 2 to 1. #nodes 2
+% Create leaf node(onecla) 3. Class 2 Cases 4 Error 0
+% Add subtree node 3 to 1. #nodes 3
+% Create node 4 split at 4 gain 0.2740 Th 0. Class 1 Cases 6 Error 2
+% Create leaf node(onecla) 5. Class 1 Cases 2 Error 0
+% Add subtree node 5 to 4. #nodes 5
+% Create node 6 split at 3 gain 0.3837 Th 0. Class 1 Cases 4 Error 2
+% Create leaf node(onecla) 7. Class 1 Cases 1 Error 0
+% Add subtree node 7 to 6. #nodes 7
+% Create node 8 split at 6 gain 1.0000 Th 0. Class 2 Cases 3 Error 1
+% Create leaf node(onecla) 9. Class 2 Cases 2 Error 0
+% Add subtree node 9 to 8. #nodes 9
+% Create leaf node(nullset) 10. Father 8 Class 2
+% Create leaf node(onecla) 11. Class 1 Cases 1 Error 0
+% Add subtree node 11 to 8. #nodes 11
+% Add subtree node 8 to 6. #nodes 11
+% Add subtree node 6 to 4. #nodes 11
+% Add subtree node 4 to 1. #nodes 11
+%
+%
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/test_zoo1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/test_zoo1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+% Here the training data is adapted from UCI ML repository, 'zoo' data
+
+dtreeCPD=tree_CPD;
+
+% load data
+fname = fullfile(BNT_HOME, 'examples', 'static', 'uci_data', 'zoo', 'zoo1.data')
+data=load(fname);
+data=data';
+
+data=transform_data_into_bnt_format(data, []);
+
+% learn decision tree from data
+ns=2*ones(1,17);
+ns(13)=6;
+ns(17)=7;
+dtreeCPD1=learn_params(dtreeCPD,1:17,data,ns,[],'stop_cases',5); % a node with less than 5 cases will not be splitted
+
+% evaluate on data
+[score,outputs]=evaluate_tree_performance(dtreeCPD1,1:17,data,ns,[]);
+fprintf('Accuracy in old training data %6.3f\n',score);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/tmp.dot
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/tmp.dot Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+digraph G {
+center = 1;
+size="4,4";
+n1 [ label = "1 :" ];
+n2 [ label = "2 :" ];
+n3 [ label = "3 :" ];
+n4 [ label = "4 :" ];
+n5 [ label = "5 :" ];
+n6 [ label = "6 :" ];
+n7 [ label = "7 :" ];
+n8 [ label = "8 :" ];
+n9 [ label = "9 :" ];
+n10 [ label = "10 :" ];
+n1 -> n5 [label="1.000"];
+n2 -> n7 [label="0.800"];
+n2 -> n10 [label="0.200"];
+n3 -> n2 [label="1.000"];
+n4 -> n8 [label="1.000"];
+n5 -> n3 [label="0.143"];
+n5 -> n5 [label="0.571"];
+n5 -> n8 [label="0.286"];
+n6 -> n4 [label="1.000"];
+n7 -> n6 [label="0.333"];
+n7 -> n9 [label="0.667"];
+n8 -> n1 [label="0.333"];
+n8 -> n5 [label="0.333"];
+n8 -> n10 [label="0.333"];
+n9 -> n2 [label="1.000"];
+n10 -> n9 [label="1.000"];
+
+}
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/transform_data_into_bnt_format.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/dtree/transform_data_into_bnt_format.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,66 @@
+function [bnt_data, old_values] = transform_data_into_bnt_format(data,cnodes)
+% TRANSFORM_DATA_TO_BNT_FORMAT Ensures discrete variables have values 1,2,..,k
+% e.g., if the values of a discrete are [0 1 6], they must be mapped to [1 2 3]
+%
+% data(i,j) is the value for i-th node in j-th case.
+% bnt_data(i,j) is the new value.
+% old_values{i} are the original values for node i.
+% cnodes is the list of all continous nodes, e.g. [3 5] means the 3rd and 5th node is continuous
+%
+% Author: yimin.zhang@intel.com
+% Last updated: Jan. 22, 2002 by Kevin Murphy.
+
+num_nodes=size(data,1);
+num_cases=size(data,2);
+old_values=cell(1,num_nodes);
+
+for i=1:num_nodes
+ if (myismember(i,cnodes)==1) %cts nodes no need to be transformed
+ %just copy the data
+ bnt_data(i,:)=data(i,:);
+ continue;
+ end
+ values = data(i,:);
+ sort_v = sort(values);
+ %remove the duplicate values in sort_v
+ v_set = unique(sort_v);
+
+ %transform the values
+ for j=1:size(values,2)
+ index = binary_search(v_set,values(j));
+ if (index==-1)
+ fprintf('value not found in tranforming data to bnt format.\n');
+ return;
+ end
+ bnt_data(i,j)=index;
+ end
+ old_values{i}=v_set;
+end
+
+
+%%%%%%%%%%%%
+
+function index=binary_search(vector, value)
+% BI_SEARCH do binary search for value in the vector
+% Author: yimin.zhang@intel.com
+% Last updated: Jan. 19, 2002
+
+begin_index=1;
+end_index=size(vector,2);
+index=-1;
+while (begin_index<=end_index)
+ mid=floor((begin_index+end_index)/2);
+ if (isstr(vector(mid)))
+ % need to write a strcmp to return three result (< = >)
+ else
+ if (value==vector(mid))
+ index=mid;
+ return;
+ elseif (value>vector(mid))
+ begin_index=mid+1;
+ else
+ end_index=mid-1;
+ end
+ end
+end
+return;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/fa1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/fa1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+% Factor analysis
+% Z -> X, Z in R^k, X in R^D, k << D (high dimensional observations explained by small source)
+% Z ~ N(0,I), X|Z ~ N(L z, Psi), where Psi is diagonal.
+%
+% We compare to Zoubin Ghahramani's code.
+
+state = 0;
+rand('seed', state);
+randn('seed', state);
+max_iter = 3;
+k = 2;
+D = 4;
+N = 10;
+X = randn(N, D);
+
+% Initialize as in Zoubin's ffa (fast factor analysis)
+X=X-ones(N,1)*mean(X);
+XX=X'*X/N;
+diagXX=diag(XX);
+cX=cov(X);
+scale=det(cX)^(1/D);
+randn('seed', 0); % must reset seed here so initial params are identical to mfa
+L0=randn(D,k)*sqrt(scale/k);
+W0 = L0;
+Psi0=diag(cX);
+
+[L1, Psi1, LL1] = ffa(X,k,max_iter);
+
+
+ns = [k D];
+dag = zeros(2,2);
+dag(1,2) = 1;
+bnet = mk_bnet(dag, ns, 'discrete', [], 'observed', 2);
+bnet.CPD{1} = gaussian_CPD(bnet, 1, 'mean', zeros(k,1), 'cov', eye(k), 'cov_type', 'diag', ...
+ 'clamp_mean', 1, 'clamp_cov', 1);
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', zeros(D,1), 'cov', diag(Psi0), 'weights', W0, ...
+ 'cov_type', 'diag', 'cov_prior_weight', 0, 'clamp_mean', 1);
+
+engine = jtree_inf_engine(bnet);
+evidence = cell(2,N);
+evidence(2,:) = num2cell(X', 1);
+
+[bnet2, LL2] = learn_params_em(engine, evidence, max_iter);
+
+s = struct(bnet2.CPD{2});
+L2 = s.weights;
+Psi2 = s.cov;
+
+
+
+% Compare to Zoubin's code
+assert(approxeq(LL2, LL1));
+assert(approxeq(Psi2, diag(Psi1)));
+assert(approxeq(L2, L1));
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/fg1.m/1.1.1.1/Thu Jun 20 00:03:30 2002//
+/fg2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/fg3.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/fg_mrf1.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/fg_mrf2.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/examples/static/fgraph
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,98 @@
+% make an unrolled HMM, convert to factor graph, and check that
+% loopy propagation on the fgraph gives the exact answers.
+
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+T = 3;
+Q = 3;
+O = 3;
+cts_obs = 0;
+param_tying = 1;
+bnet = mk_hmm_bnet(T, Q, O, cts_obs, param_tying);
+
+data = sample_bnet(bnet);
+
+fgraph = bnet_to_fgraph(bnet);
+big_bnet = fgraph_to_bnet(fgraph);
+% converting factor graph back does not recover the structure of the original bnet
+
+max_iter = 2*T;
+
+engine = {};
+engine{1} = jtree_inf_engine(bnet);
+engine{2} = belprop_inf_engine(bnet, 'max_iter', max_iter);
+engine{3} = belprop_fg_inf_engine(fgraph, 'max_iter', max_iter);
+engine{4} = jtree_inf_engine(big_bnet);
+nengines = length(engine);
+
+big_engine = 4;
+fgraph_engine = 3;
+
+
+N = 2*T;
+evidence = cell(1,N);
+onodes = bnet.observed;
+evidence(onodes) = data(onodes);
+hnodes = mysetdiff(1:N, onodes);
+
+bigN = length(big_bnet.dag);
+big_evidence = cell(1, bigN);
+big_evidence(onodes) = data(onodes);
+big_evidence(N+1:end) = {1}; % factors are observed to be 1
+
+ll = zeros(1, nengines);
+for i=1:nengines
+ if i==big_engine
+ tic; [engine{i}, ll(i)] = enter_evidence(engine{i}, big_evidence); toc
+ else
+ tic; [engine{i}, ll(i)] = enter_evidence(engine{i}, evidence); toc
+ end
+end
+
+% compare all engines to engine{1}
+
+% the log likelihood values may be bogus...
+for i=2:nengines
+ %assert(approxeq(ll(1), ll(i)));
+end
+
+
+marg = zeros(T, nengines, Q); % marg(t,e,:)
+for t=1:T
+ for e=1:nengines
+ m = marginal_nodes(engine{e}, t);
+ marg(t,e,:) = m.T;
+ end
+end
+marg
+
+
+m = cell(nengines, T);
+for i=1:T
+ for e=1:nengines
+ m{e,i} = marginal_nodes(engine{e}, hnodes(i));
+ end
+ for e=2:nengines
+ assert(approxeq(m{e,i}.T, m{1,i}.T));
+ end
+end
+
+mpe = {};
+ll = zeros(1, nengines);
+for e=1:nengines
+ if e==big_engine
+ mpe{e} = find_mpe(engine{e}, big_evidence);
+ mpe{e} = mpe{e}(1:N); % chop off dummy nodes
+ else
+ mpe{e} = find_mpe(engine{e}, evidence);
+ end
+end
+
+% fgraph can't compute loglikelihood for software reasons
+% jtree on the big_bnet gives the wrong ll
+for e=2:nengines
+ %assert(approxeq(ll(1), ll(e)));
+ assert(approxeq(cell2num(mpe{1}), cell2num(mpe{e})))
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,104 @@
+% make a factor graph corresponding to an HMM, where we absorb the evidence up front,
+% and then eliminate the observed nodes.
+% Compare this with not absorbing the evidence.
+
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+T = 3;
+Q = 3;
+O = 2;
+cts_obs = 0;
+param_tying = 1;
+bnet = mk_hmm_bnet(T, Q, O, cts_obs, param_tying);
+N = 2*T;
+onodes = bnet.observed;
+hnodes = mysetdiff(1:N, onodes);
+
+data = sample_bnet(bnet);
+
+init_factor = bnet.CPD{1};
+obs_factor = bnet.CPD{3};
+edge_factor = bnet.CPD{2}; % trans matrix
+
+nfactors = T;
+nvars = T; % hidden only
+G = zeros(nvars, nfactors);
+G(1,1) = 1;
+for t=1:T-1
+ G(t:t+1, t+1)=1;
+end
+
+node_sizes = Q*ones(1,T);
+
+% We tie params as follows:
+% the first hidden node use init_factor (number 1)
+% all hidden nodes on the backbone use edge_factor (number 2)
+% all observed nodes use the same factor, namely obs_factor
+
+small_fg = mk_fgraph_given_ev(G, node_sizes, {init_factor, edge_factor}, {obs_factor}, data(onodes), ...
+ 'equiv_class', [1 2*ones(1,T-1)], 'ev_equiv_class', ones(1,T));
+
+small_bnet = fgraph_to_bnet(small_fg);
+
+% don't pre-process evidence
+big_fg = bnet_to_fgraph(bnet);
+big_bnet = fgraph_to_bnet(big_fg);
+
+
+
+engine = {};
+engine{1} = jtree_inf_engine(bnet);
+engine{2} = belprop_fg_inf_engine(small_fg, 'max_iter', 2*T);
+engine{3} = jtree_inf_engine(small_bnet);
+engine{4} = belprop_fg_inf_engine(big_fg, 'max_iter', 3*T);
+engine{5} = jtree_inf_engine(big_bnet);
+nengines = length(engine);
+
+
+% on BN, use the original evidence
+evidence = cell(1, 2*T);
+evidence(onodes) = data(onodes);
+tic; [engine{1}, ll(1)] = enter_evidence(engine{1}, evidence); toc
+
+
+% on small_fg, we have already included the evidence
+evidence = cell(1,T);
+tic; [engine{2}, ll(2)] = enter_evidence(engine{2}, evidence); toc
+
+
+% on small_bnet, we must add evidence to the dummy nodes
+V = small_fg.nvars;
+dummy = V+1:V+small_fg.nfactors;
+N = max(dummy);
+evidence = cell(1, N);
+evidence(dummy) = {1};
+tic; [engine{3}, ll(3)] = enter_evidence(engine{3}, evidence); toc
+
+
+% on big_fg, use the original evidence
+evidence = cell(1, 2*T);
+evidence(onodes) = data(onodes);
+tic; [engine{4}, ll(4)] = enter_evidence(engine{4}, evidence); toc
+
+
+% on big_bnet, we must add evidence to the dummy nodes
+V = big_fg.nvars;
+assert(V == 2*T);
+dummy = V+1:V+big_fg.nfactors;
+N = max(dummy);
+evidence = cell(1, N);
+evidence(onodes) = data(onodes);
+evidence(dummy) = {1};
+tic; [engine{5}, ll(5)] = enter_evidence(engine{5}, evidence); toc
+
+
+marg = zeros(T, nengines, Q); % marg(t,e,:)
+for t=1:T
+ for e=1:nengines
+ m = marginal_nodes(engine{e}, t);
+ marg(t,e,:) = m.T;
+ end
+end
+marg(:,:,1)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,83 @@
+% make a factor graph corresponding to an HMM with Gaussian outputs, where we absorb the
+% evidence up front
+
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+T = 3;
+Q = 3;
+O = 2;
+cts_obs = 1;
+param_tying = 1;
+bnet = mk_hmm_bnet(T, Q, O, cts_obs, param_tying);
+N = 2*T;
+onodes = bnet.observed;
+hnodes = mysetdiff(1:N, onodes);
+
+data = sample_bnet(bnet);
+
+init_factor = bnet.CPD{1};
+obs_factor = bnet.CPD{3};
+edge_factor = bnet.CPD{2}; % trans matrix
+
+nfactors = T;
+nvars = T; % hidden only
+G = zeros(nvars, nfactors);
+G(1,1) = 1;
+for t=1:T-1
+ G(t:t+1, t+1)=1;
+end
+
+node_sizes = Q*ones(1,T);
+
+% We tie params as follows:
+% the first hidden node use init_factor (number 1)
+% all hidden nodes on the backbone use edge_factor (number 2)
+% all observed nodes use the same factor, namely obs_factor
+
+small_fg = mk_fgraph_given_ev(G, node_sizes, {init_factor, edge_factor}, {obs_factor}, data(onodes), ...
+ 'equiv_class', [1 2*ones(1,T-1)], 'ev_equiv_class', ones(1,T));
+
+small_bnet = fgraph_to_bnet(small_fg);
+
+% don't pre-process evidence
+% big_fg = bnet_to_fgraph(bnet); % can't handle Gaussian node
+
+
+engine = {};
+engine{1} = jtree_inf_engine(bnet);
+engine{2} = belprop_fg_inf_engine(small_fg, 'max_iter', 2*T);
+engine{3} = jtree_inf_engine(small_bnet);
+nengines = length(engine);
+
+
+% on BN, use the original evidence
+evidence = cell(1, 2*T);
+evidence(onodes) = data(onodes);
+tic; [engine{1}, ll(1)] = enter_evidence(engine{1}, evidence); toc
+
+
+% on small_fg, we have already included the evidence
+evidence = cell(1,T);
+tic; [engine{2}, ll(2)] = enter_evidence(engine{2}, evidence); toc
+
+
+% on small_bnet, we must add evidence to the dummy nodes
+V = small_fg.nvars;
+dummy = V+1:V+small_fg.nfactors;
+N = max(dummy);
+evidence = cell(1, N);
+evidence(dummy) = {1};
+tic; [engine{3}, ll(3)] = enter_evidence(engine{3}, evidence); toc
+
+
+
+marg = zeros(T, nengines, Q); % marg(t,e,:)
+for t=1:T
+ for e=1:nengines
+ m = marginal_nodes(engine{e}, t);
+ marg(t,e,:) = m.T;
+ end
+end
+marg(:,:,1)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg_mrf1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg_mrf1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,113 @@
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+nrows = 3;
+ncols = 3;
+npixels = nrows*ncols;
+
+% we number pixels in transposed raster scan order (top to bottom, left to right)
+
+% hidden var
+HV = reshape(1:npixels, nrows, ncols);
+% observed var
+OV = reshape(1:npixels, nrows, ncols) + length(HV(:));
+
+% observed factor
+OF = reshape(1:npixels, nrows, ncols);
+% vertical edge factor VEF(i,j) is the factor for edge HV(i,j) - HV(i+1,j)
+VEF = reshape((1:(nrows-1)*ncols), nrows-1, ncols) + length(OF(:));
+% horizontal edge factor HEF(i,j) is the factor for edge HV(i,j) - HV(i,j+1)
+HEF = reshape((1:nrows*(ncols-1)), nrows, ncols-1) + length(OF(:)) + length(VEF(:));
+
+nvars = length(HV(:))+length(OV(:));
+assert(nvars == 2*npixels);
+nfac = length(OF(:)) + length(VEF(:)) + length(HEF(:));
+
+K = 2; % number of discrete values for the hidden vars
+%O = 1; % each observed pixel is a scalar
+O = 2; % each observed pixel is binary
+
+factors = cell(1,3);
+
+% hidden states generate observed 0 or 1 plus noise
+%factors{2} = cond_gauss1_kernel(K, O, 'mean', [0 1], 'cov', [0.1 0.1]);
+pnoise = 0.2;
+factors{1} = tabular_kernel([K O], [1-pnoise pnoise; pnoise 1-pnoise]);
+ofactor = 1;
+
+% encourage compatibility between neighboring vertical pixels
+factors{2} = tabular_kernel([K K], [0.8 0.2; 0.2 0.8]);
+vedge_factor = 2;
+
+%% no constraint between neighboring horizontal pixels
+%factors{3} = tabular_kernel([K K], [0.5 0.5; 0.5 0.5]);
+
+factors{3} = tabular_kernel([K K], [0.8 0.2; 0.2 0.8]);
+hedge_factor = 3;
+
+
+
+factor_ndx = zeros(1, 3);
+G = zeros(nvars, nfac);
+ns = [K*ones(1,length(HV(:))) O*ones(1,length(OV(:)))];
+
+N = length(ns);
+%cnodes = OV(:);
+cnodes = [];
+dnodes = 1:N;
+
+for i=1:nrows
+ for j=1:ncols
+ G([HV(i,j), OV(i,j)], OF(i,j)) = 1;
+ factor_ndx(OF(i,j)) = ofactor;
+
+ if i < nrows
+ G(HV(i:i+1,j), VEF(i,j)) = 1;
+ factor_ndx(VEF(i,j)) = vedge_factor;
+ end
+
+ if j < ncols
+ G(HV(i,j:j+1), HEF(i,j)) = 1;
+ factor_ndx(HEF(i,j)) = hedge_factor;
+ end
+
+ end
+end
+
+
+fg = mk_fgraph(G, ns, factors, 'discrete', dnodes, 'equiv_class', factor_ndx);
+
+if 1
+ % make image with vertical stripes
+ I = zeros(nrows, ncols);
+ for j=1:2:ncols
+ I(:,j) = 1;
+ end
+else
+ % make image with square in middle
+ I = zeros(nrows, ncols);
+ I(3:6,3:6) = 1;
+end
+
+
+% corrupt image
+O = mod(I + (rand(nrows,ncols)> (1-pnoise)), 2);
+
+maximize = 1;
+engine = belprop_fg_inf_engine(fg, 'maximize', maximize, 'max_iter', npixels*5);
+
+evidence = cell(1, nvars);
+onodes = OV(:);
+evidence(onodes) = num2cell(O+1); % values must be in range {1,2}
+
+engine = enter_evidence(engine, evidence);
+
+for i=1:nrows
+ for j=1:ncols
+ m = marginal_nodes(engine, HV(i,j));
+ Ihat(i,j) = argmax(m.T)-1;
+ end
+end
+
+Ihat
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg_mrf2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg_mrf2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,150 @@
+seed = 0;
+rand('state', seed);
+randn('state', seed);
+
+nrows = 5;
+ncols = 5;
+npixels = nrows*ncols;
+
+% we number pixels in transposed raster scan order (top to bottom, left to right)
+
+% H(i,j) is the number of the hidden node at (i,j)
+H = reshape(1:npixels, nrows, ncols);
+% O(i,j) is the number of the obsevred node at (i,j)
+O = reshape(1:npixels, nrows, ncols) + length(H(:));
+
+
+% Make a Bayes net where each hidden pixel generates an observed pixel
+% but there are no connections between the hidden pixels.
+% We use this just to generate noisy versions of known images.
+N = 2*npixels;
+dag = zeros(N);
+for i=1:nrows
+ for j=1:ncols
+ dag(H(i,j), O(i,j)) = 1;
+ end
+end
+
+
+K = 2; % number of discrete values for the hidden vars
+ns = ones(N,1);
+ns(H(:)) = K;
+ns(O(:)) = 1;
+
+
+% make image with vertical stripes
+I = zeros(nrows, ncols);
+for j=1:2:ncols
+ I(:,j) = 1;
+end
+
+% each "hidden" node will be instantiated to the pixel in the known image
+% each observed node has conditional Gaussian distribution
+eclass = ones(1,N);
+%eclass(H(:)) = 1;
+%eclass(O(:)) = 2;
+eclass(H(:)) = 1:npixels;
+eclass(O(:)) = npixels+1;
+bnet = mk_bnet(dag, ns, 'discrete', H(:), 'equiv_class', eclass);
+
+
+%bnet.CPD{1} = tabular_CPD(bnet, H(1), 'CPT', normalise(ones(1,K)));
+for i=1:nrows
+ for j=1:ncols
+ bnet.CPD{H(i,j)} = root_CPD(bnet, H(i,j), I(i,j) + 1);
+ end
+end
+
+% If H(i,j)=1, O(i,j)=+1 plus noise
+% If H(i,j)=2, O(i,j)=-1 plus noise
+sigma = 0.5;
+bnet.CPD{eclass(O(1,1))} = gaussian_CPD(bnet, O(1,1), 'mean', [1 -1], 'cov', reshape(sigma*ones(1,K), [1 1 K]));
+ofactor = bnet.CPD{eclass(O(1,1))};
+%ofactor = gaussian_CPD('self', 2, 'dps', 1, 'cps', [], 'sz', [K O], 'mean', [1 -1], 'cov', reshape(sigma*ones(1,K), [1 1 K)));
+
+
+data = sample_bnet(bnet);
+img = reshape(data(O(:)), nrows, ncols)
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% Now create MRF represented as a factor graph to try and recover the scene
+
+% VEF(i,j) is the number of the factor for the vertical edge between HV(i,j) - HV(i+1,j)
+VEF = reshape((1:(nrows-1)*ncols), nrows-1, ncols);
+% HEF(i,j) is the number of the factor for the horizontal edge between HV(i,j) - HV(i,j+1)
+HEF = reshape((1:nrows*(ncols-1)), nrows, ncols-1) + length(VEF(:));
+
+nvars = npixels;
+nfac = length(VEF(:)) + length(HEF(:));
+
+G = zeros(nvars, nfac);
+N = length(ns);
+eclass = zeros(1, nfac); % eclass(i)=j means factor i gets its params from factors{j}
+vfactor_ndx = 1; % all vertcial edges get their params from factors{1}
+hfactor_ndx = 2; % all vertcial edges get their params from factors{2}
+for i=1:nrows
+ for j=1:ncols
+ if i < nrows
+ G(H(i:i+1,j), VEF(i,j)) = 1;
+ eclass(VEF(i,j)) = vfactor_ndx;
+ end
+ if j < ncols
+ G(H(i,j:j+1), HEF(i,j)) = 1;
+ eclass(HEF(i,j)) = hfactor_ndx;
+ end
+ end
+end
+
+
+% "kitten raised in cage" prior - more likely to see continguous vertical lines
+vfactor = tabular_kernel([K K], softeye(K, 0.9));
+hfactor = tabular_kernel([K K], softeye(K, 0.5));
+factors = cell(1,2);
+factors{vfactor_ndx} = vfactor;
+factors{hfactor_ndx} = hfactor;
+
+ev_eclass = ones(1,N); % every observation factor gets is params from ofactor
+ns = K*ones(1,nvars);
+%fg = mk_fgraph_given_ev(G, ns, factors, {ofactor}, num2cell(img), 'equiv_class', eclass, 'ev_equiv_class', ev_eclass);
+fg = mk_fgraph_given_ev(G, ns, factors, {ofactor}, img, 'equiv_class', eclass, 'ev_equiv_class', ev_eclass);
+
+bnet2 = fgraph_to_bnet(fg);
+
+% inference
+
+
+maximize = 1;
+
+engine = {};
+engine{1} = belprop_fg_inf_engine(fg, 'max_iter', npixels*2);
+engine{2} = jtree_inf_engine(bnet2);
+nengines = length(engine);
+
+% on fg, we have already included the evidence
+evidence = cell(1,npixels);
+tic; [engine{1}, ll(1)] = enter_evidence(engine{1}, evidence, 'maximize', maximize); toc
+
+
+% on bnet2, we must add evidence to the dummy nodes
+V = fg.nvars;
+dummy = V+1:V+fg.nfactors;
+N = max(dummy);
+evidence = cell(1, N);
+evidence(dummy) = {1};
+tic; [engine{2}, ll(2)] = enter_evidence(engine{2}, evidence); toc
+
+
+Ihat = zeros(nrows, ncols, nengines);
+for e=1:nengines
+ for i=1:nrows
+ for j=1:ncols
+ m = marginal_nodes(engine{e}, H(i,j));
+ Ihat(i,j,e) = argmax(m.T)-1;
+ end
+ end
+end
+Ihat
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/gaussian1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/gaussian1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+% Make the following network (from Jensen (1996) p84 fig 4.17)
+% 1
+% / | \
+% 2 3 4
+% | | |
+% 5 6 7
+% \/ \/
+% 8 9
+% where all arcs point downwards
+
+
+N = 9;
+dag = zeros(N,N);
+dag(1,2)=1; dag(1,3)=1; dag(1,4)=1;
+dag(2,5)=1; dag(3,6)=1; dag(4,7)=1;
+dag(5,8)=1; dag(6,8)=1; dag(6,9)=1; dag(7,9) = 1;
+
+ns = [5 4 3 2 2 1 2 2 2]; % vector-valued nodes
+%ns = ones(1,9); % scalar nodes
+dnodes = [];
+
+bnet = mk_bnet(dag, ns, 'discrete', []);
+rand('state', 0);
+randn('state', 0);
+for i=1:N
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+end
+
+clear engine;
+engine{1} = gaussian_inf_engine(bnet);
+engine{2} = jtree_inf_engine(bnet);
+
+[err, time] = cmp_inference_static(bnet, engine);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/gaussian2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/gaussian2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+% Make the following network (from Jensen (1996) p84 fig 4.17)
+% 1
+% / | \
+% 2 3 4
+% | | |
+% 5 6 7
+% \/ \/
+% 8 9
+% where all arcs point downwards
+
+
+N = 9;
+dag = zeros(N,N);
+dag(1,2)=1; dag(1,3)=1; dag(1,4)=1;
+dag(2,5)=1; dag(3,6)=1; dag(4,7)=1;
+dag(5,8)=1; dag(6,8)=1; dag(6,9)=1; dag(7,9) = 1;
+
+ns = [5 4 3 2 2 1 2 2 2]; % vector-valued nodes
+%ns = ones(1,9); % scalar nodes
+dnodes = [];
+
+bnet = mk_bnet(dag, ns, 'discrete', []);
+rand('state', 0);
+randn('state', 0);
+for i=1:N
+ bnet.CPD{i} = gaussian_CPD(bnet, i);
+end
+
+clear engine;
+engine{1} = gaussian_inf_engine(bnet);
+engine{2} = jtree_inf_engine(bnet);
+
+[err, time] = cmp_inference_static(bnet, engine);
+
+Nsamples = 100;
+samples = cell(N, Nsamples);
+for s=1:Nsamples
+ samples(:,s) = sample_bnet(bnet);
+end
+bnet2 = learn_params(bnet, samples);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/gibbs_test1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/gibbs_test1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,107 @@
+function gibbs_test1()
+
+disp('gibbs test 1')
+
+rand('state', 0);
+randn('state', 0);
+
+%[bnet onodes hnodes qnodes] = gibbs_ex_1;
+[bnet onodes hnodes qnodes] = gibbs_ex_2;
+
+je = jtree_inf_engine(bnet);
+ge = gibbs_sampling_inf_engine (bnet, 'T', 50, 'burnin', 0, ...
+ 'order', [2 2 1 2 1]);
+
+ev = sample_bnet(bnet);
+
+evidence = cell(length(bnet.dag), 1);
+evidence(onodes) = ev(onodes);
+[je lj] = enter_evidence(je, evidence);
+[ge lg] = enter_evidence(ge, evidence);
+
+
+mj = marginal_nodes(je, qnodes);
+
+[mg ge] = marginal_nodes (ge, qnodes);
+for t = 1:100
+ [mg ge] = marginal_nodes (ge, qnodes, 'reset_counts', 0);
+ diff = mj.T - mg.T;
+ err(t) = norm (diff(:), 1);
+end
+clf
+plot(err);
+%title('error vs num. Gibbs samples')
+
+
+%%%%%%%
+
+function [bnet, onodes, hnodes, qnodes] = gibbs_ex_1
+% bnet = gibbs_ex_1
+% a simple network to test the gibbs sampling engine
+% 1
+% / | \
+% 2 3 4
+% | | |
+% 5 6 7
+% \/ \/
+% 8 9
+% where all arcs point downwards
+
+N = 9;
+dag = zeros(N,N);
+dag(1,2)=1; dag(1,3)=1; dag(1,4)=1;
+dag(2,5)=1; dag(3,6)=1; dag(4,7)=1;
+dag(5,8)=1; dag(6,8)=1; dag(6,9)=1; dag(7,9) = 1;
+
+onodes = 8:9;
+hnodes = 1:7;
+qnodes = [1 2 6];
+ns = [2 3 4 3 5 2 4 3 2];
+
+eclass = [1 2 3 2 4 5 6 7 8];
+
+bnet = mk_bnet (dag, ns, 'equiv_class', eclass);
+
+for i = 1:3
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+for i = 4:8
+ bnet.CPD{i} = tabular_CPD(bnet, i+1);
+end
+
+
+
+%%%%%%%
+
+function [bnet, onodes, hnodes, qnodes] = gibbs_ex_2
+% bnet = gibbs_ex_2
+% a very simple network
+%
+% 1 2
+% \ /
+% 3
+
+N = 3;
+dag = zeros(N,N);
+dag(1,3)=1; dag(2,3)=1;
+
+onodes = 3;
+hnodes = 1:2;
+qnodes = 1:2;
+ns = [2 4 3];
+
+eclass = [1 2 3];
+
+bnet = mk_bnet (dag, ns, 'equiv_class', eclass);
+
+for i = 1:3
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/learn1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/learn1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+% Lawn sprinker example from Russell and Norvig p454
+% See www.cs.berkeley.edu/~murphyk/Bayes/usage.html for details.
+
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+false = 1; true = 2;
+ns = 2*ones(1,N); % binary nodes
+
+bnet = mk_bnet(dag, ns);
+bnet.CPD{C} = tabular_CPD(bnet, C, [0.5 0.5]);
+bnet.CPD{R} = tabular_CPD(bnet, R, [0.8 0.2 0.2 0.8]);
+bnet.CPD{S} = tabular_CPD(bnet, S, [0.5 0.9 0.5 0.1]);
+bnet.CPD{W} = tabular_CPD(bnet, W, [1 0.1 0.1 0.01 0 0.9 0.9 0.99]);
+
+CPT = cell(1,N);
+for i=1:N
+ s=struct(bnet.CPD{i}); % violate object privacy
+ CPT{i}=s.CPT;
+end
+
+% Generate training data
+nsamples = 50;
+samples = cell(N, nsamples);
+for i=1:nsamples
+ samples(:,i) = sample_bnet(bnet);
+end
+data = cell2num(samples);
+
+% Make a tabula rasa
+bnet2 = mk_bnet(dag, ns);
+seed = 0;
+rand('state', seed);
+bnet2.CPD{C} = tabular_CPD(bnet2, C, 'clamped', 1, 'CPT', [0.5 0.5], ...
+ 'prior_type', 'dirichlet', 'dirichlet_weight', 0);
+bnet2.CPD{R} = tabular_CPD(bnet2, R, 'prior_type', 'dirichlet', 'dirichlet_weight', 0);
+bnet2.CPD{S} = tabular_CPD(bnet2, S, 'prior_type', 'dirichlet', 'dirichlet_weight', 0);
+bnet2.CPD{W} = tabular_CPD(bnet2, W, 'prior_type', 'dirichlet', 'dirichlet_weight', 0);
+
+
+% Find MLEs from fully observed data
+bnet4 = learn_params(bnet2, samples);
+
+% Bayesian updating with 0 prior is equivalent to ML estimation
+bnet5 = bayes_update_params(bnet2, samples);
+
+CPT4 = cell(1,N);
+for i=1:N
+ s=struct(bnet4.CPD{i}); % violate object privacy
+ CPT4{i}=s.CPT;
+end
+
+CPT5 = cell(1,N);
+for i=1:N
+ s=struct(bnet5.CPD{i}); % violate object privacy
+ CPT5{i}=s.CPT;
+ assert(approxeq(CPT5{i}, CPT4{i}))
+end
+
+
+if 1
+% Find MLEs from partially observed data
+
+% hide 50% of the nodes
+samplesH = samples;
+hide = rand(N, nsamples) > 0.5;
+[I,J]=find(hide);
+for k=1:length(I)
+ samplesH{I(k), J(k)} = [];
+end
+
+engine = jtree_inf_engine(bnet2);
+max_iter = 5;
+[bnet6, LL] = learn_params_em(engine, samplesH, max_iter);
+
+CPT6 = cell(1,N);
+for i=1:N
+ s=struct(bnet6.CPD{i}); % violate object privacy
+ CPT6{i}=s.CPT;
+end
+
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/lw1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/lw1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+% Evaluate effectiveness of likelihood weighting on the lawn sprinkler example
+
+N = 4;
+dag = zeros(N,N);
+C = 1; R = 2; S = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+false = 1; true = 2;
+ns = 2*ones(1,N); % binary nodes
+
+bnet = mk_bnet(dag, ns);
+bnet.CPD{C} = tabular_CPD(bnet, C, [0.5 0.5]);
+bnet.CPD{R} = tabular_CPD(bnet, R, [0.8 0.2 0.2 0.8]);
+bnet.CPD{S} = tabular_CPD(bnet, S, [0.5 0.9 0.5 0.1]);
+bnet.CPD{W} = tabular_CPD(bnet, W, [1 0.1 0.1 0.01 0 0.9 0.9 0.99]);
+
+
+clear engine;
+engine{1} = jtree_inf_engine(bnet);
+engine{2} = likelihood_weighting_inf_engine(bnet);
+
+nengines = length(engine);
+m = cell(1, nengines);
+ll = zeros(1, nengines);
+
+evidence = cell(1,N);
+%evidence{C} = true; % evidence at the top is the easiest
+evidence{W} = true; % evidence at the bottom is the hardets
+
+query = [R];
+
+i=1;
+engine{i} = enter_evidence(engine{i}, evidence);
+exact_m = marginal_nodes(engine{i}, query);
+
+i=2;
+samples = 100:100:500;
+err = zeros(1, length(samples));
+for j=1:length(samples)
+ nsamples = samples(j);
+ engine{i} = enter_evidence(engine{i}, evidence, nsamples);
+ approx_m = marginal_nodes(engine{i}, query);
+ a1=approxeq(approx_m.T,exact_m.T,1e-1);
+ a2=approxeq(approx_m.T,exact_m.T,1e-2);
+ a3=approxeq(approx_m.T,exact_m.T,1e-3);
+ e = sum(abs(approx_m.T(:) - exact_m.T(:)));
+ fprintf('%d samples, 1dp %d, 2dp %d, 3dp %d, err %f\n', nsamples, a1, a2, a3, e);
+ err(j) = e;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/mfa1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/mfa1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,80 @@
+% Factor analysis
+% Z -> X, Z in R^k, X in R^D, k << D (high dimensional observations explained by small source)
+% Z ~ N(0,I), X|Z ~ N(L z, Psi), where Psi is diagonal.
+%
+% Mixtures of FA
+% Now X|Z,W=i ~ N(mu(i) + L(i) Z, Psi(i))
+%
+% We compare to Zoubin Ghahramani's code.
+
+randn('state', 0);
+max_iter = 3;
+M = 2;
+k = 3;
+D = 5;
+
+n = 5;
+X1 = randn(n, D);
+X2 = randn(n, D) + 2; % move the mean to (2,2,2...)
+X = [X1; X2];
+N = size(X, 1);
+
+% initialise as in mfa
+tiny=exp(-700);
+mX = mean(X);
+cX=cov(X);
+scale=det(cX)^(1/D);
+randn('state',0); % must reset seed here so initial params are identical to mfa
+L0=randn(D*M,k)*sqrt(scale/k);
+W0 = permute(reshape(L0, [D M k]), [1 3 2]); % use D,K,M
+Psi0=diag(cX)+tiny;
+Pi0=ones(M,1)/M;
+Mu0=randn(M,D)*sqrtm(cX)+ones(M,1)*mX;
+
+[Lh1, Ph1, Mu1, Pi1, LL1] = mfa(X,M,k,max_iter);
+Lh1 = permute(reshape(Lh1, [D M k]), [1 3 2]); % use D,K,M
+
+
+ns = [M k D];
+dag = zeros(3);
+dag(1,3) = 1;
+dag(2,3) = 1;
+dnodes = 1;
+onodes = 3;
+
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'observed', onodes);
+bnet.CPD{1} = tabular_CPD(bnet, 1, Pi0);
+
+%bnet.CPD{2} = gaussian_CPD(bnet, 2, zeros(k, 1), eye(k), [], 'diag', 'untied', 'clamp_mean', 'clamp_cov');
+
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', zeros(k, 1), 'cov', eye(k), 'cov_type', 'diag', ...
+ 'cov_prior_weight', 0, 'clamp_mean', 1, 'clamp_cov', 1);
+
+%bnet.CPD{3} = gaussian_CPD(bnet, 3, Mu0', repmat(diag(Psi0), [1 1 M]), W0, 'diag', 'tied');
+
+bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', Mu0', 'cov', repmat(diag(Psi0), [1 1 M]), ...
+ 'weights', W0, 'cov_type', 'diag', 'cov_prior_weight', 0, 'tied_cov', 1);
+
+engine = jtree_inf_engine(bnet);
+evidence = cell(3, N);
+evidence(3,:) = num2cell(X', 1);
+
+[bnet2, LL2, engine2] = learn_params_em(engine, evidence, max_iter);
+
+s = struct(bnet2.CPD{1});
+Pi2 = s.CPT(:);
+s = struct(bnet2.CPD{3});
+Mu2 = s.mean;
+W2 = s.weights;
+Sigma2 = s.cov;
+
+
+% Compare to Zoubin's code
+assert(approxeq(LL1,LL2));
+for i=1:M
+ assert(approxeq(W2(:,:,i), Lh1(:,:,i)));
+ assert(approxeq(Sigma2(:,:,i), diag(Ph1)));
+ assert(approxeq(Mu2(:,i), Mu1(i,:)));
+ assert(approxeq(Pi2(:), Pi1(:)));
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/mixexp1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/mixexp1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,72 @@
+% Fit a piece-wise linear regression model.
+% Here is the model
+%
+% X \
+% | |
+% Q |
+% | /
+% Y
+%
+% where all arcs point down.
+% We condition everything on X, so X is a root node. Q is a softmax, and Y is a linear Gaussian.
+% Q is hidden, X and Y are observed.
+
+X = 1;
+Q = 2;
+Y = 3;
+dag = zeros(3,3);
+dag(X,[Q Y]) = 1;
+dag(Q,Y) = 1;
+ns = [1 2 1]; % make X and Y scalars, and have 2 experts
+dnodes = [2];
+onodes = [1 3];
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'observed', onodes);
+
+
+w = [-5 5]; % w(:,i) is the normal vector to the i'th decisions boundary
+b = [0 0]; % b(i) is the offset (bias) to the i'th decisions boundary
+
+mu = [0 0];
+sigma = 1;
+Sigma = repmat(sigma*eye(ns(Y)), [ns(Y) ns(Y) ns(Q)]);
+W = [-1 1];
+W2 = reshape(W, [ns(Y) ns(X) ns(Q)]);
+
+bnet.CPD{1} = root_CPD(bnet, 1);
+bnet.CPD{2} = softmax_CPD(bnet, 2, w, b);
+bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', mu, 'cov', Sigma, 'weights', W2);
+
+
+
+% Check inference
+
+x = 0.1;
+ystar = 1;
+
+engine = jtree_inf_engine(bnet);
+[engine, loglik] = enter_evidence(engine, {x, [], ystar});
+Qpost = marginal_nodes(engine, 2);
+
+% eta(i,:) = softmax (gating) params for expert i
+eta = [b' w'];
+
+% theta(i,:) = regression vector for expert i
+theta = [mu' W'];
+
+% yhat(i) = E[y | Q=i, x] = prediction of i'th expert
+x1 = [1 x]';
+yhat = theta * x1;
+
+% gate_prior(i,:) = Pr(Q=i | x)
+gate_prior = normalise(exp(eta * x1));
+
+% cond_lik(i) = Pr(y | Q=i, x)
+cond_lik = (1/(sqrt(2*pi)*sigma)) * exp(-(0.5/sigma^2) * ((ystar - yhat) .* (ystar - yhat)));
+
+% gate_posterior(i,:) = Pr(Q=i | x, y)
+[gate_posterior, lik] = normalise(gate_prior .* cond_lik);
+
+assert(approxeq(gate_posterior(:), Qpost.T(:)));
+assert(approxeq(log(lik), loglik));
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/mixexp2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/mixexp2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,104 @@
+% Fit a piece-wise linear regression model.
+% Here is the model
+%
+% X \
+% | |
+% Q |
+% | /
+% Y
+%
+% where all arcs point down.
+% We condition everything on X, so X is a root node. Q is a softmax, and Y is a linear Gaussian.
+% Q is hidden, X and Y are observed.
+
+X = 1;
+Q = 2;
+Y = 3;
+dag = zeros(3,3);
+dag(X,[Q Y]) = 1;
+dag(Q,Y) = 1;
+ns = [1 2 1]; % make X and Y scalars, and have 2 experts
+dnodes = [2];
+onodes = [1 3];
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'observed', onodes);
+
+IRLS_iter = 10;
+clamped = 0;
+
+bnet.CPD{1} = root_CPD(bnet, 1);
+
+if 0
+ % start with good initial params
+ w = [-5 5]; % w(:,i) is the normal vector to the i'th decisions boundary
+ b = [0 0]; % b(i) is the offset (bias) to the i'th decisions boundary
+
+ mu = [0 0];
+ sigma = 1;
+ Sigma = repmat(sigma*eye(ns(Y)), [ns(Y) ns(Y) ns(Q)]);
+ W = [-1 1];
+ W2 = reshape(W, [ns(Y) ns(X) ns(Q)]);
+
+ bnet.CPD{2} = softmax_CPD(bnet, 2, w, b, clamped, IRLS_iter);
+ bnet.CPD{3} = gaussian_CPD(bnet, 3, mu, Sigma, W2);
+else
+ % start with rnd initial params
+ rand('state', 0);
+ randn('state', 0);
+ bnet.CPD{2} = softmax_CPD(bnet, 2, 'clamped', clamped, 'max_iter', IRLS_iter);
+ bnet.CPD{3} = gaussian_CPD(bnet, 3);
+end
+
+
+
+load('/examples/static/Misc/mixexp_data.txt', '-ascii');
+% Just use 1/10th of the data, to speed things up
+data = mixexp_data(1:10:end, :);
+%data = mixexp_data;
+
+%plot(data(:,1), data(:,2), '.')
+
+
+s = struct(bnet.CPD{2}); % violate object privacy
+%eta0 = [s.glim.b1; s.glim.w1]';
+eta0 = [s.glim{1}.b1; s.glim{1}.w1]';
+s = struct(bnet.CPD{3}); % violate object privacy
+W = reshape(s.weights, [1 2]);
+theta0 = [s.mean; W]';
+
+%figure(1)
+%mixexp_plot(theta0, eta0, data);
+%suptitle('before learning')
+
+ncases = size(data, 1);
+cases = cell(3, ncases);
+cases([1 3], :) = num2cell(data');
+
+engine = jtree_inf_engine(bnet);
+
+% log lik before learning
+ll = 0;
+for l=1:ncases
+ ev = cases(:,l);
+ [engine, loglik] = enter_evidence(engine, ev);
+ ll = ll + loglik;
+end
+
+% do learning
+max_iter = 5;
+[bnet2, LL2] = learn_params_em(engine, cases, max_iter);
+
+s = struct(bnet2.CPD{2});
+%eta2 = [s.glim.b1; s.glim.w1]';
+eta2 = [s.glim{1}.b1; s.glim{1}.w1]';
+s = struct(bnet2.CPD{3});
+W = reshape(s.weights, [1 2]);
+theta2 = [s.mean; W]';
+
+%figure(2)
+%mixexp_plot(theta2, eta2, data);
+%suptitle('after learning')
+
+fprintf('mixexp2: loglik before learning %f, after %d iters %f\n', ll, length(LL2), LL2(end));
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/mixexp3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/mixexp3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+% Fit a piece-wise linear regression model.
+% Here is the model
+%
+% X \
+% | |
+% Q |
+% | /
+% Y
+%
+% where all arcs point down.
+% We condition everything on X, so X is a root node. Q is a softmax, and Y is a linear Gaussian.
+% Q is hidden, X and Y are observed.
+
+X = 1;
+Q = 2;
+Y = 3;
+dag = zeros(3,3);
+dag(X,[Q Y]) = 1;
+dag(Q,Y) = 1;
+ns = [1 2 1]; % make X and Y scalars, and have 2 experts
+dnodes = [2];
+onodes = [1 3];
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'observed', onodes);
+
+IRLS_iter = 10;
+clamped = 0;
+
+bnet.CPD{1} = root_CPD(bnet, 1);
+
+% start with good initial params
+w = [-5 5]; % w(:,i) is the normal vector to the i'th decisions boundary
+b = [0 0]; % b(i) is the offset (bias) to the i'th decisions boundary
+
+mu = [0 0];
+sigma = 1;
+Sigma = repmat(sigma*eye(ns(Y)), [ns(Y) ns(Y) ns(Q)]);
+W = [-1 1];
+W2 = reshape(W, [ns(Y) ns(X) ns(Q)]);
+
+bnet.CPD{2} = softmax_CPD(bnet, 2, w, b, clamped, IRLS_iter);
+bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', mu, 'cov', Sigma, 'weights', W2);
+
+
+engine = jtree_inf_engine(bnet);
+
+evidence = cell(1,3);
+evidence{X} = 0.68;
+
+engine = enter_evidence(engine, evidence);
+
+m = marginal_nodes(engine, Y);
+m.mu
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/mog1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/mog1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,81 @@
+% Fit a mixture of Gaussians using netlab and BNT
+
+rand('state', 0);
+randn('state', 0);
+
+% Q -> Y
+ncenters = 2; dim = 2;
+cov_type = 'full';
+
+% Generate the data from a mixture of 2 Gaussians
+%mu = randn(dim, ncenters);
+mu = zeros(dim, ncenters);
+mu(:,1) = [-1 -1]';
+mu(:,1) = [1 1]';
+Sigma = repmat(0.1*eye(dim),[1 1 ncenters]);
+ndat1 = 8; ndat2 = 8;
+%ndat1 = 2; ndat2 = 2;
+ndata = ndat1+ndat2;
+x1 = gsamp(mu(:,1), Sigma(:,:,1), ndat1);
+x2 = gsamp(mu(:,2), Sigma(:,:,2), ndat2);
+data = [x1; x2];
+%plot(x1(:,1),x1(:,2),'ro', x2(:,1),x2(:,2),'bx')
+
+% Fit using netlab
+max_iter = 3;
+mix = gmm(dim, ncenters, cov_type);
+options = foptions;
+options(1) = 1; % verbose
+options(14) = max_iter;
+
+% extract initial params
+%mix = gmminit(mix, x, options); % Initialize with K-means
+mu0 = mix.centres';
+pi0 = mix.priors(:);
+Sigma0 = mix.covars; % repmat(eye(dim), [1 1 ncenters]);
+
+[mix, options] = gmmem(mix, data, options);
+
+% Final params
+ll1 = options(8);
+mu1 = mix.centres';
+pi1 = mix.priors(:);
+Sigma1 = mix.covars;
+
+
+
+
+% BNT
+
+dag = zeros(2);
+dag(1,2) = 1;
+node_sizes = [ncenters dim];
+discrete_nodes = 1;
+onodes = 2;
+
+bnet = mk_bnet(dag, node_sizes, 'discrete', discrete_nodes, 'observed', onodes);
+bnet.CPD{1} = tabular_CPD(bnet, 1, pi0);
+bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', mu0, 'cov', Sigma0, 'cov_type', cov_type, ...
+ 'cov_prior_weight', 0);
+
+engine = jtree_inf_engine(bnet);
+
+evidence = cell(2, ndata);
+evidence(2,:) = num2cell(data', 1);
+
+[bnet2, LL] = learn_params_em(engine, evidence, max_iter);
+
+ll2 = LL(end);
+s1 = struct(bnet2.CPD{1});
+pi2 = s1.CPT(:);
+
+s2 = struct(bnet2.CPD{2});
+mu2 = s2.mean;
+Sigma2 = s2.cov;
+
+% assert(approxeq(ll1, ll2)); % gmmem returns the value after the final M step, GMT before
+assert(approxeq(mu1, mu2));
+assert(approxeq(Sigma1, Sigma2))
+assert(approxeq(pi1, pi2))
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/mpe1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/mpe1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+seed = 1;
+rand('state', seed);
+randn('state', seed);
+
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+false = 1; true = 2;
+ns = 2*ones(1,N); % binary nodes
+
+bnet = mk_bnet(dag, ns);
+if 0
+ bnet.CPD{C} = tabular_CPD(bnet, C, [0.5 0.5]);
+ bnet.CPD{R} = tabular_CPD(bnet, R, [0.8 0.2 0.2 0.8]);
+ bnet.CPD{S} = tabular_CPD(bnet, S, [0.5 0.9 0.5 0.1]);
+ bnet.CPD{W} = tabular_CPD(bnet, W, [1 0.1 0.1 0.01 0 0.9 0.9 0.99]);
+else
+ for i=1:N, bnet.CPD{i} = tabular_CPD(bnet, i); end
+end
+
+
+
+evidence = cell(1,N);
+onodes = [1 3];
+data = sample_bnet(bnet);
+evidence(onodes) = data(onodes);
+
+clear engine;
+engine{1} = belprop_inf_engine(bnet);
+engine{2} = jtree_inf_engine(bnet);
+engine{3} = global_joint_inf_engine(bnet);
+engine{4} = var_elim_inf_engine(bnet);
+E = length(engine);
+
+clear mpe;
+for e=1:E
+ mpe{e} = find_mpe(engine{e}, evidence);
+end
+for e=2:E
+ assert(isequal(mpe{1}, mpe{e}))
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/mpe2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/mpe2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+% Computing most probable explanation.
+
+% If you don't break ties consistently, loopy can give wrong mpe
+% even though the graph has no cycles, and even though the max-marginals are the same.
+% This example was contributed by Wentau Yih 29 Jan 02.
+
+% define loop-free graph structure (all edges point down)
+%
+% Xe1 Xe2
+% | |
+% E1 E2
+% \ /
+% R1
+% |
+% Xr1
+
+N = 6;
+dag = zeros(N,N);
+Xe1 = 1; Xe2 = 2; E1 = 3; E2 = 4; R1 = 5; Xr1 = 6;
+dag(Xe1, E1) = 1;
+dag(Xe2, E2) = 1;
+dag([E1 E2], R1) = 1;
+dag(R1, Xr1) = 1;
+
+node_sizes = [ 1 1 2 2 2 1 ];
+
+% create BN
+
+bnet = mk_bnet(dag, node_sizes, 'observed', [Xe1 Xe2 Xr1]);
+
+% fill in CPT
+
+bnet.CPD{Xe1} = tabular_CPD(bnet, Xe1, [1]);
+bnet.CPD{Xe2} = tabular_CPD(bnet, Xe2, [1]);
+bnet.CPD{E1} = tabular_CPD(bnet, E1, [0.2 0.8]);
+bnet.CPD{E2} = tabular_CPD(bnet, E2, [0.3 0.7]);
+bnet.CPD{R1} = tabular_CPD(bnet, R1, [1 1 1 0.8 0 0 0 0.2]);
+bnet.CPD{Xr1} = tabular_CPD(bnet, Xr1, [0.15 0.85]);
+
+clear engine;
+engine{1} = belprop_inf_engine(bnet);
+engine{2} = jtree_inf_engine(bnet);
+engine{3} = global_joint_inf_engine(bnet);
+engine{4} = var_elim_inf_engine(bnet);
+
+evidence = cell(1,N);
+evidence{Xe1} = 1; evidence{Xe2} = 1; evidence{Xr1} = 1;
+
+mpe = find_mpe(engine{1}, evidence, 'break_ties', 0) % gives wrong results
+mpe = find_mpe(engine{1}, evidence)
+for i=2:4
+ mpe = find_mpe(engine{i}, evidence)
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/nodeorderExample.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/nodeorderExample.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,68 @@
+% example to illustrate why nodes must be numbered topologically.
+% Due to Shinya OHTANI
+% 9 June 2004
+
+%%%%%%%%% WRONG RESULTS because 2 -> 1
+% should have P(parent|no evidence) = prior = [03. 0.7]
+
+node = struct('ChildNode', 1, ...
+ 'ParentNode', 2);
+
+adjacency = zeros(2);
+adjacency([node.ParentNode], node.ChildNode) = 1;
+
+value = {{'TRUE'; 'FALSE'}, ...
+ {'TRUE'; 'FALSE'}};
+
+bnet = mk_bnet(adjacency, [2 2]);
+bnet.CPD{node.ChildNode} = tabular_CPD(bnet, node.ChildNode, [0.2 0.4 0.8 0.6]);
+bnet.CPD{node.ParentNode} = tabular_CPD(bnet, node.ParentNode, [0.3 0.7]);
+
+evidence = cell(1,2);
+% evidence{node.ChildNode} = 1;
+% evidence{node.ParentNode} = 1;
+
+engine = jtree_inf_engine(bnet);
+[engine, loglik] = enter_evidence(engine, evidence);
+
+
+marg = marginal_nodes(engine, node.ChildNode);
+disp(sprintf(' ChildNode : %8.6f %8.6f',marg.T(1),marg.T(2)) );
+marg = marginal_nodes(engine, node.ParentNode);
+disp(sprintf(' ParentNode : %8.6f %8.6f',marg.T(1),marg.T(2)) );
+
+%
+% ChildNode : 0.534483 0.465517
+% ParentNode : 0.155172 0.844828
+% loglik = 0.15
+
+
+
+%%%%%%%%% RIGHT RESULTS because 1 -> 2
+
+node = struct('ChildNode', 2, ...
+ 'ParentNode', 1);
+
+
+adjacency = zeros(2);
+adjacency([node.ParentNode], node.ChildNode) = 1;
+
+value = {{'TRUE'; 'FALSE'}, ...
+ {'TRUE'; 'FALSE'}};
+
+bnet = mk_bnet(adjacency, [2 2]);
+bnet.CPD{node.ChildNode} = tabular_CPD(bnet, node.ChildNode, [0.2 0.4 0.8 0.6]);
+bnet.CPD{node.ParentNode} = tabular_CPD(bnet, node.ParentNode, [0.3 0.7]);
+
+evidence = cell(1,2);
+% evidence{node.ChildNode} = 1;
+% evidence{node.ParentNode} = 1;
+
+engine = jtree_inf_engine(bnet);
+[engine, loglik] = enter_evidence(engine, evidence);
+
+
+marg = marginal_nodes(engine, node.ChildNode);
+disp(sprintf(' ChildNode : %8.6f %8.6f',marg.T(1),marg.T(2)) );
+marg = marginal_nodes(engine, node.ParentNode);
+disp(sprintf(' ParentNode : %8.6f %8.6f',marg.T(1),marg.T(2)) );
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/qmr1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/qmr1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,112 @@
+% Make a QMR-like network
+% This is a bipartite graph, where the top layer contains hidden disease nodes,
+% and the bottom later contains observed finding nodes.
+% The diseases have Bernoulli CPDs, the findings noisy-or CPDs.
+% See quickscore_inf_engine for references.
+
+pMax = 0.01;
+Nfindings = 10;
+Ndiseases = 5;
+%Nfindings = 20;
+%Ndiseases = 10;
+
+N=Nfindings+Ndiseases;
+findings = Ndiseases+1:N;
+diseases = 1:Ndiseases;
+
+G = zeros(Ndiseases, Nfindings);
+for i=1:Nfindings
+ v= rand(1,Ndiseases);
+ rents = find(v<0.8);
+ if (length(rents)==0)
+ rents=ceil(rand(1)*Ndiseases);
+ end
+ G(rents,i)=1;
+end
+
+prior = pMax*rand(1,Ndiseases);
+leak = 0.5*rand(1,Nfindings); % in real QMR, leak approx exp(-0.02) = 0.98
+%leak = ones(1,Nfindings); % turns off leaks, which makes inference much harder
+inhibit = rand(Ndiseases, Nfindings);
+inhibit(not(G)) = 1;
+
+
+% first half of findings are +ve, second half -ve
+% The very first and last findings are hidden
+pos = 2:floor(Nfindings/2);
+neg = (pos(end)+1):(Nfindings-1);
+
+% Make the bnet in the straightforward way
+tabular_leaves = 0;
+obs_nodes = myunion(pos, neg) + Ndiseases;
+big_bnet = mk_qmr_bnet(G, inhibit, leak, prior, tabular_leaves, obs_nodes);
+big_evidence = cell(1, N);
+big_evidence(findings(pos)) = num2cell(repmat(2, 1, length(pos)));
+big_evidence(findings(neg)) = num2cell(repmat(1, 1, length(neg)));
+
+%clf;draw_layout(big_bnet.dag);
+%filename = '../public_html/Bayes/Figures/qmr.rnd.jpg';
+%% 3x3 inches
+%set(gcf,'units','inches');
+%set(gcf,'PaperPosition',[0 0 3 3])
+%print(gcf,'-djpeg','-r100',filename);
+
+
+% Marginalize out hidden leaves apriori
+positive_leaves_only = 1;
+[bnet, vals] = mk_minimal_qmr_bnet(G, inhibit, leak, prior, pos, neg, positive_leaves_only);
+obs_nodes = bnet.observed;
+evidence = cell(1, Ndiseases + length(obs_nodes));
+evidence(obs_nodes) = num2cell(vals);
+
+
+clear engine;
+engine{1} = quickscore_inf_engine(inhibit, leak, prior);
+engine{2} = jtree_inf_engine(big_bnet);
+engine{3} = jtree_inf_engine(bnet);
+
+%fname = '/home/cs/murphyk/matlab/Misc/loopybel.txt';
+global BNT_HOME
+fname = sprintf('%s/loopybel.txt', BNT_HOME);
+
+
+max_iter = 6;
+engine{4} = pearl_inf_engine(bnet, 'protocol', 'parallel', 'max_iter', max_iter);
+%engine{5} = belprop_inf_engine(bnet, 'max_iter', max_iter, 'filename', fname);
+engine{5} = belprop_inf_engine(bnet, 'max_iter', max_iter);
+
+E = length(engine);
+exact = 1:3;
+loopy = [4 5];
+
+ll = zeros(1,E);
+tic; engine{1} = enter_evidence(engine{1}, pos, neg); toc
+tic; [engine{2}, ll(2)] = enter_evidence(engine{2}, big_evidence); toc
+tic; [engine{3}, ll(3)] = enter_evidence(engine{3}, evidence); toc
+tic; [engine{4}, ll(4), niter(4)] = enter_evidence(engine{4}, evidence); toc
+tic; [engine{5}, niter(5)] = enter_evidence(engine{5}, evidence); toc
+
+ll
+
+post = zeros(E, Ndiseases);
+for e=1:E
+ for i=diseases(:)'
+ m = marginal_nodes(engine{e}, i);
+ post(e, i) = m.T(2);
+ end
+end
+
+for e=exact(:)'
+ for i=diseases(:)'
+ assert(approxeq(post(1, i), post(e, i)));
+ end
+end
+
+a = zeros(Ndiseases, 2);
+for ei=1:length(loopy)
+ for i=diseases(:)'
+ a(i,ei) = approxeq(post(1, i), post(loopy(ei), i));
+ end
+end
+disp('is the loopy posterior correct?');
+disp(a)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/qmr2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/qmr2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,77 @@
+% Test jtree_compiled on a toy QMR network.
+
+rand('state', 0);
+randn('state', 0);
+pMax = 0.01;
+Nfindings = 10;
+Ndiseases = 5;
+
+N=Nfindings+Ndiseases;
+findings = Ndiseases+1:N;
+diseases = 1:Ndiseases;
+
+G = zeros(Ndiseases, Nfindings);
+for i=1:Nfindings
+ v= rand(1,Ndiseases);
+ rents = find(v<0.8);
+ if (length(rents)==0)
+ rents=ceil(rand(1)*Ndiseases);
+ end
+ G(rents,i)=1;
+end
+
+prior = pMax*rand(1,Ndiseases);
+leak = 0.5*rand(1,Nfindings); % in real QMR, leak approx exp(-0.02) = 0.98
+%leak = ones(1,Nfindings); % turns off leaks, which makes inference much harder
+inhibit = rand(Ndiseases, Nfindings);
+inhibit(not(G)) = 1;
+
+% first half of findings are +ve, second half -ve
+% The very first and last findings are hidden
+pos = 2:floor(Nfindings/2);
+neg = (pos(end)+1):(Nfindings-1);
+
+big = 1;
+
+if big
+ % Make the bnet in the straightforward way
+ tabular_leaves = 1;
+ obs_nodes = myunion(pos, neg) + Ndiseases;
+ bnet = mk_qmr_bnet(G, inhibit, leak, prior, tabular_leaves, obs_nodes);
+ evidence = cell(1, N);
+ evidence(findings(pos)) = num2cell(repmat(2, 1, length(pos)));
+ evidence(findings(neg)) = num2cell(repmat(1, 1, length(neg)));
+else
+ % Marginalize out hidden leaves apriori
+ positive_leaves_only = 1;
+ [bnet, vals] = mk_minimal_qmr_bnet(G, inhibit, leak, prior, pos, neg, positive_leaves_only);
+ obs_nodes = bnet.observed;
+ evidence = cell(1, Ndiseases + length(obs_nodes));
+ evidence(obs_nodes) = num2cell(vals);
+end
+
+engine = {};
+engine{end+1} = jtree_inf_engine(bnet);
+
+E = length(engine);
+exact = 1:E;
+ll = zeros(1,E);
+for e=1:E
+ tic; [engine{e}, ll(e)] = enter_evidence(engine{e}, evidence); toc
+end
+
+assert(all(approxeq(ll(exact), ll(exact(1)))))
+
+post = zeros(E, Ndiseases);
+for e=1:E
+ for i=diseases(:)'
+ m = marginal_nodes(engine{e}, i);
+ post(e, i) = m.T(2);
+ end
+end
+for e=exact(:)'
+ for i=diseases(:)'
+ assert(approxeq(post(1, i), post(e, i)));
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/sample1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/sample1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+% Check sampling on a mixture of experts model
+%
+% X \
+% | |
+% Q |
+% | /
+% Y
+%
+% where all arcs point down.
+% We condition everything on X, so X is a root node. Q is a softmax, and Y is a linear Gaussian.
+% Q is hidden, X and Y are observed.
+
+X = 1;
+Q = 2;
+Y = 3;
+dag = zeros(3,3);
+dag(X,[Q Y]) = 1;
+dag(Q,Y) = 1;
+ns = [1 2 2];
+dnodes = [2];
+bnet = mk_bnet(dag, ns, dnodes);
+
+x = 0.5;
+bnet.CPD{1} = root_CPD(bnet, 1, x);
+bnet.CPD{2} = softmax_CPD(bnet, 2);
+bnet.CPD{3} = gaussian_CPD(bnet, 3);
+
+data_case = sample_bnet(bnet, 'evidence', {0.8, [], []})
+ll = log_lik_complete(bnet, data_case)
+
+data_case = sample_bnet(bnet, 'evidence', {-11, [], []})
+ll = log_lik_complete(bnet, data_case)
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/softev1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/softev1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,60 @@
+% Check that adding soft evidence to a hidden node is equivalent to evaluating its leaf CPD.
+
+% Make an HMM
+T = 3; Q = 2; O = 2; cts_obs = 0; param_tying = 0;
+bnet = mk_hmm_bnet(T, Q, O, cts_obs, param_tying);
+N = 2*T;
+onodes = bnet.observed;
+hnodes = mysetdiff(1:N, onodes);
+for i=1:N
+ bnet.CPD{i} = tabular_CPD(bnet, i);
+end
+
+ev = sample_bnet(bnet);
+evidence = cell(1,N);
+evidence(onodes) = ev(onodes);
+
+engine = jtree_inf_engine(bnet);
+
+[engine, ll] = enter_evidence(engine, evidence);
+query = 1;
+m = marginal_nodes(engine, query);
+
+
+% Make a Markov chain with the same backbone
+bnet2 = mk_markov_chain_bnet(T, Q);
+for i=1:T
+ S = struct(bnet.CPD{hnodes(i)}); % violate object privacy
+ bnet2.CPD{i} = tabular_CPD(bnet2, i, S.CPT);
+end
+
+% Evaluate the observed leaves of the HMM
+soft_ev = cell(1,T);
+for i=1:T
+ S = struct(bnet.CPD{onodes(i)}); % violate object privacy
+ dist = S.CPT(:, evidence{onodes(i)});
+ soft_ev{i} = dist;
+end
+
+% Use the leaf potentials as soft evidence
+engine2 = jtree_inf_engine(bnet2);
+[engine2, ll2] = enter_evidence(engine2, cell(1,T), 'soft', soft_ev);
+m2 = marginal_nodes(engine2, query);
+
+assert(approxeq(m2.T, m.T))
+assert(approxeq(ll2, ll))
+
+
+
+% marginal on node 1 without evidence
+[engine2, ll2] = enter_evidence(engine2, cell(1,T));
+m2 = marginal_nodes(engine2, 1);
+
+% add soft evidence
+soft_ev=cell(1,T);
+soft_ev{1}=[0.7 0.3];
+[engine2, ll2] = enter_evidence(engine2, cell(1,T), 'soft', soft_ev);
+m3 = marginal_nodes(engine2, 1);
+
+assert(approxeq(normalise(m2.T .* [0.7 0.3]'), m3.T))
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/softmax1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/softmax1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,109 @@
+% Check that softmax works with a simple classification demo.
+% Based on netlab's demglm2
+% X -> Q where X is an input node, and Q is a softmax
+
+rand('state', 0);
+randn('state', 0);
+
+% Check inference
+
+input_dim = 2;
+num_classes = 3;
+IRLS_iter = 3;
+
+net = glm(input_dim, num_classes, 'softmax');
+
+dag = zeros(2);
+dag(1,2) = 1;
+discrete_nodes = [2];
+bnet = mk_bnet(dag, [input_dim num_classes], 'discrete', discrete_nodes, 'observed', 1);
+bnet.CPD{1} = root_CPD(bnet, 1);
+clamped = 0;
+bnet.CPD{2} = softmax_CPD(bnet, 2, net.w1, net.b1, clamped, IRLS_iter);
+
+engine = jtree_inf_engine(bnet);
+
+x = rand(1, input_dim);
+q = glmfwd(net, x);
+
+[engine, ll] = enter_evidence(engine, {x, []});
+m = marginal_nodes(engine, 2);
+assert(approxeq(m.T(:), q(:)));
+
+
+% Check learning
+% We use EM, but in fact there is no hidden data.
+% The M step will call IRLS on the softmax node.
+
+% Generate data from three classes in 2d
+input_dim = 2;
+num_classes = 3;
+
+% Fix seeds for reproducible results
+randn('state', 42);
+rand('state', 42);
+
+ndata = 10;
+% Generate mixture of three Gaussians in two dimensional space
+data = randn(ndata, input_dim);
+targets = zeros(ndata, 3);
+
+% Priors for the clusters
+prior(1) = 0.4;
+prior(2) = 0.3;
+prior(3) = 0.3;
+
+% Cluster centres
+c = [2.0, 2.0; 0.0, 0.0; 1, -1];
+
+ndata1 = prior(1)*ndata;
+ndata2 = (prior(1) + prior(2))*ndata;
+% Put first cluster at (2, 2)
+data(1:ndata1, 1) = data(1:ndata1, 1) * 0.5 + c(1,1);
+data(1:ndata1, 2) = data(1:ndata1, 2) * 0.5 + c(1,2);
+targets(1:ndata1, 1) = 1;
+
+% Leave second cluster at (0,0)
+data((ndata1 + 1):ndata2, :) = ...
+ data((ndata1 + 1):ndata2, :);
+targets((ndata1+1):ndata2, 2) = 1;
+
+data((ndata2+1):ndata, 1) = data((ndata2+1):ndata,1) *0.6 + c(3, 1);
+data((ndata2+1):ndata, 2) = data((ndata2+1):ndata,2) *0.6 + c(3, 2);
+targets((ndata2+1):ndata, 3) = 1;
+
+
+if 0
+ ndata = 1;
+ data = x;
+ targets = [1 0 0];
+end
+
+options = foptions;
+options(1) = -1; % verbose
+options(14) = IRLS_iter;
+[net2, options2] = glmtrain(net, options, data, targets);
+net2.ll = options2(8); % type 'help foptions' for details
+
+cases = cell(2, ndata);
+for l=1:ndata
+ q = find(targets(l,:)==1);
+ x = data(l,:);
+ cases{1,l} = x(:);
+ cases{2,l} = q;
+end
+
+max_iter = 2; % we have complete observability, so 1 iter is enough
+[bnet2, ll2] = learn_params_em(engine, cases, max_iter);
+
+w = get_field(bnet2.CPD{2},'weights');
+b = get_field(bnet2.CPD{2},'offset')';
+
+w
+net2.w1
+
+b
+net2.b1
+
+% assert(approxeq(net2.ll, ll2)); % glmtrain returns ll after final M step, learn_params before
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/examples/static/sprinkler1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/examples/static/sprinkler1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,112 @@
+% Lawn sprinker example from Russell and Norvig p454
+% For a picture, see http://www.cs.berkeley.edu/~murphyk/Bayes/usage.html#basics
+
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+false = 1; true = 2;
+ns = 2*ones(1,N); % binary nodes
+
+%bnet = mk_bnet(dag, ns);
+bnet = mk_bnet(dag, ns, 'names', {'cloudy','S','R','W'}, 'discrete', 1:4);
+names = bnet.names;
+%C = names{'cloudy'};
+bnet.CPD{C} = tabular_CPD(bnet, C, [0.5 0.5]);
+bnet.CPD{R} = tabular_CPD(bnet, R, [0.8 0.2 0.2 0.8]);
+bnet.CPD{S} = tabular_CPD(bnet, S, [0.5 0.9 0.5 0.1]);
+bnet.CPD{W} = tabular_CPD(bnet, W, [1 0.1 0.1 0.01 0 0.9 0.9 0.99]);
+
+
+CPD{C} = reshape([0.5 0.5], 2, 1);
+CPD{R} = reshape([0.8 0.2 0.2 0.8], 2, 2);
+CPD{S} = reshape([0.5 0.9 0.5 0.1], 2, 2);
+CPD{W} = reshape([1 0.1 0.1 0.01 0 0.9 0.9 0.99], 2, 2, 2);
+joint = zeros(2,2,2,2);
+for c=1:2
+ for r=1:2
+ for s=1:2
+ for w=1:2
+ joint(c,s,r,w) = CPD{C}(c) * CPD{S}(c,s) * CPD{R}(c,r) * ...
+ CPD{W}(s,r,w);
+ end
+ end
+ end
+end
+
+joint2 = repmat(reshape(CPD{C}, [2 1 1 1]), [1 2 2 2]) .* ...
+ repmat(reshape(CPD{S}, [2 2 1 1]), [1 1 2 2]) .* ...
+ repmat(reshape(CPD{R}, [2 1 2 1]), [1 2 1 2]) .* ...
+ repmat(reshape(CPD{W}, [1 2 2 2]), [2 1 1 1]);
+
+assert(approxeq(joint, joint2));
+
+
+engine = jtree_inf_engine(bnet);
+
+evidence = cell(1,N);
+evidence{W} = true;
+
+[engine, ll] = enter_evidence(engine, evidence);
+
+m = marginal_nodes(engine, S);
+p1 = m.T(true) % P(S=true|W=true) = 0.4298
+lik1 = exp(ll); % P(W=true) = 0.6471
+assert(approxeq(p1, 0.4298));
+assert(approxeq(lik1, 0.6471));
+
+pSandW = sumv(joint(:,true,:,true), [C R]); % P(S,W) = sum_cr P(CSRW)
+pW = sumv(joint(:,:,:,true), [C S R]);
+pSgivenW = pSandW / pW; % P(S=t|W=t) = P(S=t,W=t)/P(W=t)
+assert(approxeq(pW, lik1))
+assert(approxeq(pSgivenW, p1))
+
+
+m = marginal_nodes(engine, R);
+p2 = m.T(true) % P(R=true|W=true) = 0.7079
+
+pRandW = sumv(joint(:,:,true,true), [C S]); % P(R,W) = sum_cr P(CSRW)
+pRgivenW = pRandW / pW; % P(R=t|W=t) = P(R=t,W=t)/P(W=t)
+assert(approxeq(pRgivenW, p2))
+
+
+% Add extra evidence that R=true
+evidence{R} = true;
+
+[engine, ll] = enter_evidence(engine, evidence);
+
+m = marginal_nodes(engine, S);
+p3 = m.T(true) % P(S=true|W=true,R=true) = 0.1945
+assert(approxeq(p3, 0.1945))
+
+
+pSandRandW = sumv(joint(:,true,true,true), [C]); % P(S,R,W) = sum_c P(cSRW)
+pRandW = sumv(joint(:,:,true,true), [C S]); % P(R,W) = sum_cs P(cSRW)
+pSgivenWR = pSandRandW / pRandW; % P(S=t|W=t,R=t) = P(S=t,R=t,W=t)/P(W=t,R=t)
+assert(approxeq(pSgivenWR, p3))
+
+% So the sprinkler is less likely to be on if we know that
+% it is raining, since the rain can "explain away" the fact
+% that the grass is wet.
+
+lik3 = exp(ll); % P(W=true, R=true) = 0.4581
+% So the combined evidence is less likely (of course)
+
+
+
+
+% Joint distributions
+
+evidence = cell(1,N);
+[engine, ll] = enter_evidence(engine, evidence);
+m = marginal_nodes(engine, [S R W]);
+
+evidence{R} = 2;
+[engine, ll] = enter_evidence(engine, evidence);
+m = marginal_nodes(engine, [S R W]);
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+/add_ev_to_dmarginal.m/1.1.1.1/Thu Jun 27 20:34:32 2002//
+/add_evidence_to_gmarginal.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/bnet_to_fgraph.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/compute_fwd_interface.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/compute_interface_nodes.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/compute_joint_pot.m/1.1.1.1/Mon Jun 7 15:50:34 2004//
+/compute_minimal_interface.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/convert_dbn_CPDs_to_pots.m/1.1.1.1/Fri Nov 22 22:35:00 2002//
+/convert_dbn_CPDs_to_tables.m/1.1.1.1/Thu Jan 23 18:44:50 2003//
+/convert_dbn_CPDs_to_tables1.m/1.1.1.1/Thu Jan 23 18:49:48 2003//
+/convert_dbn_CPDs_to_tables_slow.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/dbn_to_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/dbn_to_hmm.m/1.1.1.1/Sun Feb 2 00:23:38 2003//
+/determine_elim_constraints.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/dispcpt.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/do_intervention.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/dsep.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/dsep_test.m/1.1.1.1/Sat Jan 18 23:10:16 2003//
+/enumerate_scenarios.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/fgraph_to_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/hodbn_to_bnet.m/1.1.1.1/Wed Jul 24 14:48:06 2002//
+/is_mnet.m/1.1.1.1/Sun Jun 16 20:01:22 2002//
+/linear_gaussian_to_cpot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/log_lik_complete.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/log_marg_lik_complete.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_bnet.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_dbn.m/1.1.1.1/Sat Feb 1 19:42:14 2003//
+/mk_fgraph.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_fgraph_given_ev.m/1.1.1.1/Mon Jun 24 18:56:26 2002//
+/mk_higher_order_dbn.m/1.1.1.1/Tue Jul 23 13:17:04 2002//
+/mk_limid.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_mnet.m/1.1.1.1/Sun Jun 16 19:52:12 2002//
+/mk_mrf2.m/1.1.1.1/Tue Dec 31 22:06:48 2002//
+/mk_mutilated_samples.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_named_CPT.m/1.1.1.1/Tue Mar 30 17:18:54 2004//
+/mk_slice_and_half_dbn.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/noisyORtoTable.m/1.1.1.1/Mon Aug 2 22:05:58 2004//
+/partition_dbn_nodes.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/partition_matrix_vec_3.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/sample_bnet.m/1.1.1.1/Thu Jun 10 01:17:46 2004//
+/sample_bnet_nocell.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/sample_dbn.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/score_bnet_complete.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/shrink_obs_dims_in_gaussian.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/shrink_obs_dims_in_table.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/solve_limid.m/1.1.1.1/Mon Jun 7 15:48:02 2004//
+/unroll_dbn_topology.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/unroll_higher_order_topology.m/1.1.1.1/Fri May 31 10:25:58 2002//
+/unroll_set.m/1.1.1.1/Mon Dec 16 17:57:14 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/general
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+/bnet_to_gdl_graph.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/calc_mpe.m/1.1.1.1/Mon Jun 17 21:58:38 2002//
+/calc_mpe_bucket.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/calc_mpe_dbn.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/calc_mpe_given_inf_engine.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/calc_mpe_global.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/compute_interface_nodes.m/1.1.1.1/Wed May 29 15:59:54 2002//
+/mk_gdl_graph.m/1.1.1.1/Wed May 29 15:59:54 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/general/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/Old/bnet_to_gdl_graph.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/Old/bnet_to_gdl_graph.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function gdl = bnet_to_gdl_graph(bnet)
+% BNET_TO_GDL_GRAPH Convert a Bayesian network to a GDL graph
+% gdl = bnet_to_gdl_graph(bnet)
+%
+% Each node in the BN gets converted to a single node in the GDL graph,
+% representing its family; its kernel function is the corresponding CPD.
+
+N = length(bnet.dag);
+doms = cell(1,N);
+for i=1:N
+ doms{i} = family(bnet.dag, i);
+end
+
+U = mk_undirected(bnet.dag);
+gdl = mk_gdl_graph(U, doms, bnet.node_sizes, bnet.CPD, 'equiv_class', bnet.equiv_class, ...
+ 'discrete', bnet.dnodes, 'chance', bnet.chance_nodes, ...
+ 'decision', bnet.decision_nodes, 'utility', bnet.utility_nodes);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/Old/calc_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/Old/calc_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,58 @@
+function [mpe, ll] = calc_mpe(engine, evidence, break_ties)
+% CALC_MPE Computes the most probable explanation of the evidence
+% [mpe, ll] = calc_mpe_given_inf_engine(engine, evidence, break_ties)
+%
+% INPUT
+% engine must support max-propagation
+% evidence{i} is the observed value of node i, or [] if hidden
+% break_ties is optional. If 1, we will force ties to be broken consistently
+% by calling enter_evidence N times.
+%
+% OUTPUT
+% mpe{i} is the most likely value of node i (cell array!)
+% ll is the log-likelihood of the globally best assignment
+%
+% This currently only works when all hidden nodes are discrete
+
+if nargin < 3, break_ties = 0; end
+
+
+[engine, ll] = enter_evidence(engine, evidence, 'maximize', 1);
+
+observed = ~isemptycell(evidence);
+
+if 0 % fgraphs don't support bnet_from_engine
+onodes = find(observed);
+bnet = bnet_from_engine(engine);
+pot_type = determine_pot_type(bnet, onodes);
+assert(pot_type == 'd');
+end
+
+scalar = 1;
+evidence = evidence(:); % hack to handle unrolled DBNs
+N = length(evidence);
+mpe = cell(1,N);
+for i=1:N
+ m = marginal_nodes(engine, i);
+ % observed nodes are all set to 1 inside the inference engine, so we must undo this
+ if observed(i)
+ mpe{i} = evidence{i};
+ else
+ mpe{i} = argmax(m.T);
+ % Bug fix by Ron Zohar, 8/15/01
+ % If there are ties, we must break them as follows (see Jensen96, p106)
+ if break_ties
+ evidence{i} = mpe{i};
+ [engine, ll] = enter_evidence(engine, evidence, 'maximize', 1);
+ end
+ end
+ if length(mpe{i}) > 1, scalar = 0; end
+end
+
+if nargout >= 2
+ bnet = bnet_from_engine(engine);
+ ll = log_lik_complete(bnet, mpe(:));
+end
+if 0 % scalar
+ mpe = cell2num(mpe);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/Old/calc_mpe_bucket.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/Old/calc_mpe_bucket.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,160 @@
+function [mpe, ll] = calc_mpe_bucket(bnet, new_evidence, max_over)
+%
+% PURPOSE:
+% CALC_MPE Computes the most probable explanation to the network nodes
+% given the evidence.
+%
+% [mpe, ll] = calc_mpe(engine, new_evidence, max_over)
+%
+% INPUT:
+% bnet - the bayesian network
+% new_evidence - optional, if specified - evidence to be incorporated [cell(1,n)]
+% max_over - optional, if specified determines the variable elimination order [1:n]
+%
+% OUTPUT:
+% mpe - the MPE assignmet for the net variables (or [] if no satisfying assignment)
+% ll - log assignment probability.
+%
+% Notes:
+% 1. Adapted from '@var_elim_inf_engine\marginal_nodes' for MPE by Ron Zohar, 8/7/01
+% 2. Only discrete potentials are supported at this time.
+% 3. Complexity: O(nw*) where n is the number of nodes and w* is the induced tree width.
+% 4. Implementation based on:
+% - R. Dechter, "Bucket Elimination: A Unifying Framework for Probabilistic Inference",
+% UA1 96, pp. 211-219.
+
+
+ns = bnet.node_sizes;
+n = length(bnet.dag);
+evidence = cell(1,n);
+if (nargin<2)
+ new_evidence = evidence;
+end
+
+onodes = find(~isemptycell(new_evidence)); % observed nodes
+hnodes = find(isemptycell(new_evidence)); % hidden nodes
+pot_type = determine_pot_type(bnet, onodes);
+
+if pot_type ~= 'd'
+ error('only disrete potentials supported at this time')
+end
+
+for i=1:n
+ fam = family(bnet.dag, i);
+ CPT{i} = convert_to_pot(bnet.CPD{bnet.equiv_class(i)}, pot_type, fam(:), evidence);
+end
+
+% handle observed nodes: set impossible cases' probability to zero
+% rather than prun matrix (this makes backtracking easier)
+
+for ii=onodes
+ lIdx = 1:ns(ii);
+ lIdx = setdiff(lIdx, new_evidence{ii});
+
+ sCPT=struct(CPT{ii}); % violate object privacy
+
+ sargs = '';
+ for jj=1:(length(sCPT.domain)-1)
+ sargs = [sargs, ':,'];
+ end
+ for jj=lIdx
+ eval(['sCPT.T(', sargs, num2str(jj), ')=0;']);
+ end
+ CPT{ii}=dpot(sCPT.domain, sCPT.sizes, sCPT.T);
+end
+
+B = cell(1,n);
+for b=1:n
+ B{b} = mk_initial_pot(pot_type, [], [], [], []);
+end
+
+if (nargin<3)
+ max_over = (1:n);
+end
+order = max_over; % no attempt to optimize this
+
+
+% Initialize the buckets with the CPDs assigned to them
+for i=1:n
+ b = bucket_num(domain_pot(CPT{i}), order);
+ B{b} = multiply_pots(B{b}, CPT{i});
+end
+
+% Do backward phase
+max_over = max_over(length(max_over):-1:1); % reverse
+for i=max_over(1:end-1)
+ % max-ing over variable i which occurs in bucket j
+ j = bucket_num(i, order);
+ rest = mysetdiff(domain_pot(B{j}), i);
+ %temp = marginalize_pot_max(B{j}, rest);
+ temp = marginalize_pot(B{j}, rest, 1);
+ b = bucket_num(domain_pot(temp), order);
+ % fprintf('maxing over bucket %d (var %d), putting result into bucket %d\n', j, i, b);
+ sB=struct(B{b}); % violate object privacy
+ if ~isempty(sB.domain)
+ B{b} = multiply_pots(B{b}, temp);
+ else
+ B{b} = temp;
+ end
+end
+result = B{1};
+marginal = pot_to_marginal(result);
+[prob, mpe] = max(marginal.T);
+
+% handle impossible cases
+if ~(prob>0)
+ mpe = [];
+ ll = -inf;
+ %warning('evidence has zero probability')
+ return
+end
+
+ll = log(prob);
+
+% Do forward phase
+for ii=2:n
+ marginal = pot_to_marginal(B{ii});
+ mpeidx = [];
+ for jj=order(1:length(mpe))
+ assert(ismember(jj, marginal.domain)) %%% bug
+ temp = find_equiv_posns(jj, marginal.domain);
+ mpeidx = [mpeidx, temp] ;
+ if isempty(temp)
+ mpeidx = [mpeidx, Inf] ;
+ end
+ end
+ [mpeidxsorted sortedtompe] = sort(mpeidx) ;
+
+ % maximize the matrix obtained from assigning values from previous buckets.
+ % this is done by building a string and using eval.
+
+ kk=1;
+ sargs = '(';
+ for jj=1:length(marginal.domain)
+ if (jj~=1)
+ sargs = [sargs, ','];
+ end
+ if (mpeidxsorted(kk)==jj)
+ sargs = [sargs, num2str(mpe(sortedtompe(kk)))];
+ if (kk= 5
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'equiv_class', bnet.equiv_class = args{i+1};
+ case 'chance', bnet.chance_nodes = args{i+1};
+ case 'utility', bnet.utility_nodes = args{i+1};
+ case 'decision', bnet.decision_nodes = args{i+1};
+ case 'discrete', bnet.dnodes = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+
+gdl.G = G;
+gdl.vars = vars;
+gdl.doms = domains;
+gdl.node_sizes = node_sizes;
+gdl.cnodes = mysetdiff(vars, gdl.dnodes);
+gdl.kernels = kernels;
+gdl.type = 'gdl';
+
+% Compute a bit vector representation of the set of domains
+% dom_bitv(i,j) = 1 iff variable j occurs in domain i
+gdl.dom_bitv = zeros(N, length(vars));
+for i=1:N
+ gdl.dom_bitv(i, domains{i}) = 1;
+end
+
+% compute the interesection of the domains on either side of each edge (separating set)
+gdl.sepset = cell(N, N);
+gdl.nbrs = cell(1,N);
+for i=1:N
+ nbrs = neighbors(G, i);
+ gdl.nbrs{i} = nbrs;
+ for j = nbrs(:)'
+ gdl.sepset{i,j} = myintersect(domains{i}, domains{j});
+ end
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/add_ev_to_dmarginal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/add_ev_to_dmarginal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function fmarginal = add_ev_to_dmarginal(fmarginal, evidence, ns)
+% ADD_EV_TO_DMARGINAL 'pump up' observed nodes back to their original size.
+% fmarginal = add_ev_to_dmarginal(fmarginal, evidence, ns)
+%
+% We introduce 0s into the array in positions which are incompatible with the evidence.
+
+dom = fmarginal.domain;
+odom = dom(~isemptycell(evidence(dom)));
+vals = cat(1, evidence{odom});
+index = mk_multi_index(length(dom), find_equiv_posns(odom, dom), vals);
+T = 0*myones(ns(dom));
+ens = ns(:)';
+ens(odom) = 1;
+T(index{:}) = myreshape(fmarginal.T, ens(dom));
+fmarginal.T = T;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/add_evidence_to_gmarginal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/add_evidence_to_gmarginal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,78 @@
+function fullm = add_evidence_to_gmarginal(fmarginal, evidence, ns, cnodes)
+% ADD_EVIDENCE_TO_GMARGINAL 'pump up' observed nodes back to their original size.
+% function fullm = add_evidence_to_gmarginal(fmarginal, evidence, ns, cnodes)
+%
+% We introduce 0s into the array in positions which are incompatible with the evidence.
+% for both discrete and continuous nodes.
+%
+% See also add_ev_to_dmarginal
+
+dom = fmarginal.domain;
+fullm.domain = fmarginal.domain;
+
+% Find out which values of the discrete parents (if any) are compatible with
+% the discrete evidence (if any).
+dnodes = mysetdiff(1:length(ns), cnodes);
+ddom = myintersect(dom, dnodes);
+cdom = myintersect(dom, cnodes);
+odom = dom(~isemptycell(evidence(dom)));
+hdom = dom(isemptycell(evidence(dom)));
+
+% Find the entries in the big table that are compatible with the discrete evidence.
+% (We will put the probabilities from the small inferred table into these positions.)
+% We could use add_ev_to_dmarginal to do this.
+dobs = myintersect(ddom, odom);
+dvals = cat(1, evidence{dobs});
+ens = ns; % effective node sizes
+ens(dobs) = 1;
+S = prod(ens(ddom));
+subs = ind2subv(ens(ddom), 1:S);
+mask = find_equiv_posns(dobs, ddom);
+%subs(mask) = dvals; % bug fix by P. Brutti
+for i=1:length(mask),
+ subs(:,mask(i)) = dvals(i);
+end
+supportedQs = subv2ind(ns(ddom), subs);
+
+if isempty(ddom)
+ Qarity = 1;
+else
+ Qarity = prod(ns(ddom));
+end
+fullm.T = zeros(Qarity, 1);
+fullm.T(supportedQs) = fmarginal.T(:);
+fullm.T = myreshape(fullm.T, ns(ddom));
+
+
+if isempty(cdom)
+ fullm.mu = [];
+ fullm.sigma = [];
+ return;
+end
+
+% Now put the hidden cts parts into their right blocks,
+% leaving the observed cts parts as 0.
+cobs = myintersect(cdom, odom);
+chid = myintersect(cdom, hdom);
+cvals = cat(1, evidence{cobs});
+n = sum(ns(cdom));
+fullm.mu = zeros(n,Qarity);
+fullm.Sigma = zeros(n,n,Qarity);
+
+if ~isempty(chid)
+ chid_blocks = block(find_equiv_posns(chid, cdom), ns(cdom));
+end
+if ~isempty(cobs)
+ cobs_blocks = block(find_equiv_posns(cobs, cdom), ns(cdom));
+end
+
+for i=1:length(supportedQs)
+ Q = supportedQs(i);
+ if ~isempty(chid)
+ fullm.mu(chid_blocks, Q) = fmarginal.mu(:, i);
+ fullm.Sigma(chid_blocks, chid_blocks, Q) = fmarginal.Sigma(:,:,i);
+ end
+ if ~isempty(cobs)
+ fullm.mu(cobs_blocks, Q) = cvals(:);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/bnet_to_fgraph.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/bnet_to_fgraph.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function fg = bnet_to_fgraph(bnet)
+% BNET_TO_FGRAPH Convert a Bayes net to a factor graph
+% fg = bnet_to_fgraph(bnet)
+%
+% We create one factor per family, whose kernel is the CPD
+
+nnodes = length(bnet.dag);
+G = zeros(nnodes, nnodes);
+for i=1:nnodes
+ G(family(bnet.dag, i), i) = 1;
+end
+
+fg = mk_fgraph(G, bnet.node_sizes, bnet.CPD, 'equiv_class', bnet.equiv_class, 'discrete', bnet.dnodes);
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/compute_fwd_interface.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/compute_fwd_interface.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function int = compute_fwd_interface(intra, inter)
+% COMPUTE_FWD_INTERFACE Compute nodes with children in the next slice
+% function int = compute_fwd_interface(intra, inter)
+
+int = [];
+ss = length(intra);
+for u=1:ss
+ if any(inter(u,:))
+ int = [int u];
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/compute_interface_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/compute_interface_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+function [interface, persist, transient] = compute_interface_nodes(intra, inter)
+% COMPUTE_INTERFACE_NODES Find the nodes in a DBN that represent a sufficient statistic
+% [interface, persist, transient] = compute_interface_nodes(intra, inter)
+%
+% The interface nodes are all those that has an incoming temporal arc,
+% or which are parents of such nodes.
+% If the parents are in the previous slice, this just means they have an
+% outgoing temporal arc.
+% (The parents of nodes with incoming temporal arcs are needed
+% because moralization will bring them into the clique.)
+%
+% The persisent nodes are all those that have one or more incoming temporal arc.
+% The transient nodes are all the non-persistent.
+%
+% See U. Kjaerulff, "dHugin: A computational system for dynamic
+% time-sliced Bayesian networks", Intl. J. Forecasting (11) 89-111, 1995
+
+n = length(intra);
+interface = [];
+persist = [];
+% any nodes with incoming arcs
+for u=1:n
+ if any(inter(:,u))
+ interface = [interface u];
+ persist = [persist u];
+ end
+end
+% Any nodes which are parents of nodes with incoming arcs
+for u=1:n
+ cs = children(intra, u);
+ if any(inter(:, cs))
+ interface = [interface u];
+ end
+ %cs = children(inter, u);
+ % if ~isempty(myintersect(cs, persist))
+ % interface = [interface u];
+ %end
+end
+interface = unique(interface);
+persist = unique(persist);
+transient = mysetdiff(1:n, persist);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/compute_joint_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/compute_joint_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function [jpot, loglik] = compute_joint_pot(bnet, nodes, evidence, domain)
+% COMPUTE_JOINT_POT Compute the global joint potential of a Bayes net
+% function jpot = compute_joint_pot(bnet, nodes, evidence, domain)
+
+if nargin < 4, domain = nodes; end
+
+onodes = find(~isemptycell(evidence));
+pot_type = determine_pot_type(bnet, onodes, domain);
+
+jpot = mk_initial_pot(pot_type, domain, bnet.node_sizes, bnet.cnodes, onodes);
+for i=nodes(:)'
+ e = bnet.equiv_class(i);
+ fam = family(bnet.dag, i);
+ pot = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence);
+ jpot = multiply_by_pot(jpot, pot);
+end
+%[jpot, loglik] = normalize_pot(jpot); % causes errors in asia_dt1 etc
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/compute_minimal_interface.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/compute_minimal_interface.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function clqs = compute_minimal_interface(intra, inter)
+
+int = compute_fwd_interface(intra, inter);
+ss = length(intra);
+Z = zeros(ss);
+dag = [intra inter;
+ Z intra];
+G = moralize(dag);
+intra2 = G(1:ss,1:ss);
+inter2 = G(1:ss,(1:ss)+ss);
+G = unroll_dbn_topology(intra2, inter2, ss);
+T = ss;
+last_slice = (1:ss) + (T-1)*ss;
+G = (G + G')/2; % mk symmetric
+G2 = (expm(full(G)) > 0); % closure of graph
+G3 = G2(last_slice, last_slice);
+[c,v] = scc(G3); % connected components
+ncomp = size(v,1);
+clqs = cell(1,ncomp);
+for i=1:ncomp
+ ndx = find(v(i,:)>0);
+ clqs{i} = v(i,ndx);
+ clqs{i} = myintersect(clqs{i}, int);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/convert_dbn_CPDs_to_pots.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/convert_dbn_CPDs_to_pots.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function CPDpot = convert_dbn_CPDs_to_pots(bnet, evidence, pot_type, softCPDpot)
+% CONVERT_DBN_CPDS_TO_POTS Convert CPDs of (possibly instantiated) DBN nodes to potentials
+% CPDpot = convert_dbn_CPDs_to_pots(bnet, evidence, pot_type, softCPDpot)
+%
+% CPDpot{n,t} is a potential containing P(n,t|pa(n,t), ev)
+% softCPDpot{n,t} is a potential containing P(n,t|pa(n,t), ev) insted of using n's CPD
+
+[ss T] = size(evidence);
+
+if nargin < 4, softCPDpot = cell(ss,T); end
+CPDpot = softCPDpot;
+
+% Convert CPDs of instantiated nodes to potential form
+t = 1;
+for n=1:ss
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n, 1);
+ if isempty(softCPDpot{n,t})
+ CPDpot{n,t} = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence(:,1));
+ end
+end
+for n=1:ss
+ fam = family(bnet.dag, n, 2);
+ e = bnet.equiv_class(n, 2);
+ for t=2:T
+ if isempty(softCPDpot{n,t})
+ CPDpot{n,t} = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence(:,t-1:t));
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/convert_dbn_CPDs_to_tables.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/convert_dbn_CPDs_to_tables.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,201 @@
+function CPDpot = convert_dbn_CPDs_to_tables(bnet, evidence)
+% CONVERT_DBN_CPDS_TO_TABLES Convert CPDs of (possibly instantiated) DBN nodes to tables
+% CPDpot = convert_dbn_CPDs_to_tables(bnet, evidence)
+%
+% CPDpot{n,t} is a table containing P(n,t|pa(n,t), ev)
+% All hidden nodes are assumed to be discrete.
+% We assume the observed nodes are the same in every slice.
+%
+% Evaluating the conditional likelihood of long evidence sequences can be very slow,
+% so we take pains to vectorize where possible.
+
+[ss T] = size(evidence);
+%obs_bitv = ~isemptycell(evidence(:));
+obs_bitv = zeros(1, 2*ss);
+obs_bitv(bnet.observed) = 1;
+obs_bitv(bnet.observed+ss) = 1;
+
+ns = bnet.node_sizes(:);
+CPDpot = cell(ss,T);
+
+for n=1:ss
+ % slice 1
+ t = 1;
+ ps = parents(bnet.dag, n);
+ e = bnet.equiv_class(n, 1);
+ if ~any(obs_bitv(ps))
+ CPDpot{n,t} = convert_CPD_to_table_hidden_ps(bnet.CPD{e}, evidence{n,t});
+ else
+ CPDpot{n,t} = convert_to_table(bnet.CPD{e}, [ps n], evidence(:,1));
+ end
+
+% special cases: c=child, p=parents, d=discrete, h=hidden, 1sl=1slice
+% if c=h=1 then c=d=1, since hidden nodes must be discrete
+% c=h c=d p=h p=d 1sl method
+% ---------------------------
+% 1 1 1 1 - replicate CPT
+% - 1 - 1 - evaluate CPT on evidence *
+% 0 1 1 1 1 dhmm
+% 0 0 1 1 1 ghmm
+% other loop
+%
+% * = any subset of the domain may be observed
+
+% Example where all of the special cases occur - a hierarchical HMM
+% where the top layer (G) and leaves (Y) are observed and
+% all nodes are discrete except Y.
+% (O turns on if Y is an outlier)
+
+% G ---------> G
+% | |
+% v v
+% S --------> S
+% | |
+% v v
+% Y Y
+% ^ ^
+% | |
+% O O
+
+% Evaluating P(yt|St,Ot) is the ghmm case
+% Evaluating P(St|S(t-1),gt) is the eval CPT case
+% Evaluating P(gt|g(t-1) is the eval CPT case (hdom = [])
+% Evaluating P(Ot) is the replicated CPT case
+
+% Cts parents (e.g., inputs) would require an additional special case for speed
+
+
+ % slices 2..T
+ [ss T] = size(evidence);
+ self = n+ss;
+ ps = parents(bnet.dag, self);
+ e = bnet.equiv_class(n, 2);
+
+ if 1
+ debug = 0;
+ hidden_child = ~obs_bitv(n);
+ discrete_child = myismember(n, bnet.dnodes);
+ hidden_ps = all(~obs_bitv(ps));
+ discrete_ps = mysubset(ps, bnet.dnodes);
+ parents_in_same_slice = all(ps > ss);
+
+ if hidden_child & discrete_child & hidden_ps & discrete_ps
+ CPDpot = helper_repl(bnet, evidence, n, CPDpot, obs_bitv, debug);
+ elseif discrete_child & discrete_ps
+ CPDpot = helper_eval(bnet, evidence, n, CPDpot, obs_bitv, debug);
+ elseif discrete_child & hidden_ps & discrete_ps & parents_in_same_slice
+ CPDpot = helper_dhmm(bnet, evidence, n, CPDpot, obs_bitv, debug);
+ elseif ~discrete_child & hidden_ps & discrete_ps & parents_in_same_slice
+ CPDpot = helper_ghmm(bnet, evidence, n, CPDpot, obs_bitv, debug);
+ else
+ if debug, fprintf('node %d, slow\n', n); end
+ for t=2:T
+ CPDpot{n,t} = convert_to_table(bnet.CPD{e}, [ps self], evidence(:,t-1:t));
+ end
+ end
+ end
+
+ if 0
+ for t=2:T
+ CPDpot2{n,t} = convert_to_table(bnet.CPD{e}, [ps self], evidence(:,t-1:t));
+ if ~approxeq(CPDpot{n,t}, CPDpot2{n,t})
+ fprintf('CPDpot n=%d, t=%d\n',n,t);
+ keyboard
+ end
+ end
+ end
+
+
+end
+
+
+
+
+%%%%%%%
+function CPDpot = helper_repl(bnet, evidence, n, CPDpot, obs_bitv, debug)
+
+[ss T] = size(evidence);
+if debug, fprintf('node %d, repl\n', n); end
+e = bnet.equiv_class(n, 2);
+CPT = convert_CPD_to_table_hidden_ps(bnet.CPD{e}, []);
+CPDpot(n,2:T) = num2cell(repmat(CPT, [1 1 T-1]), [1 2]);
+
+
+
+%%%%%%%
+function CPDpot = helper_eval(bnet, evidence, n, CPDpot, obs_bitv, debug)
+
+[ss T] = size(evidence);
+self = n+ss;
+ps = parents(bnet.dag, self);
+e = bnet.equiv_class(n, 2);
+ns = bnet.node_sizes(:);
+% Example: given CPT(p1, p2, p3, p4, c), where p1,p3 are observed
+% we create CPT([p2 p4 c], [p1 p3]).
+% We then convert all observed p1,p3 into indices ndx
+% and return CPT(:, ndx)
+CPT = CPD_to_CPT(bnet.CPD{e});
+domain = [ps self];
+% if dom is [3 7 8] and 3,8 are observed, odom_rel = [1 3], hdom_rel = 2,
+% odom = [3 8], hdom = 7
+odom_rel = find(obs_bitv(domain));
+hdom_rel = find(~obs_bitv(domain));
+odom = domain(odom_rel);
+hdom = domain(hdom_rel);
+if isempty(hdom)
+ CPT = CPT(:);
+else
+ CPT = permute(CPT, [hdom_rel odom_rel]);
+ CPT = reshape(CPT, prod(ns(hdom)), prod(ns(odom)));
+end
+parents_in_same_slice = all(ps > ss);
+if parents_in_same_slice
+ if debug, fprintf('node %d eval 1 slice\n', n); end
+ data = cell2num(evidence(odom-ss,2:T)); %data(i,t) = val of i'th obs parent at t+1
+else
+ if debug, fprintf('node %d eval 2 slice\n', n); end
+ % there's probably a way of vectorizing this...
+ data = zeros(length(odom), T-1);
+ for t=2:T
+ ev = evidence(:,t-1:t);
+ ev = ev(:);
+ ev2 = ev(odom);
+ data(:,t-1) = cat(1, ev2{:});
+ %data(:,t-1) = cell2num(ev2);
+ end
+end
+ndx = subv2ind(ns(odom), data'); % ndx(t) encodes data(:,t)
+if isempty(hdom)
+ CPDpot(n,2:T) = num2cell(CPT(ndx)); % a cell array of floats
+else
+ CPDpot(n,2:T) = num2cell(CPT(:, ndx), 1); % a cell array of column vectors
+end
+
+%%%%%%%
+function CPDpot = helper_dhmm(bnet, evidence, n, CPDpot, obs_bitv, debug)
+
+if debug, fprintf('node %d, dhmm\n', n); end
+[ss T] = size(evidence);
+self = n+ss;
+ps = parents(bnet.dag, self);
+e = bnet.equiv_class(n, 2);
+ns = bnet.node_sizes(:);
+CPT = CPD_to_CPT(bnet.CPD{e});
+CPT = reshape(CPT, [prod(ns(ps)) ns(self)]); % what if no parents?
+%obslik = mk_dhmm_obs_lik(cell2num(evidence(n,2:T)), CPT);
+obslik = eval_pdf_cond_multinomial(cell2num(evidence(n,2:T)), CPT);
+CPDpot(n,2:T) = num2cell(obslik, 1);
+
+
+%%%%%%%
+function CPDpot = helper_ghmm(bnet, evidence, n, CPDpot, obs_bitv, debug)
+
+if debug, fprintf('node %d, ghmm\n', n); end
+[ss T] = size(evidence);
+e = bnet.equiv_class(n, 2);
+S = struct(bnet.CPD{e});
+ev2 = cell2num(evidence(n,2:T));
+%obslik = mk_ghmm_obs_lik(ev2, S.mean, S.cov);
+obslik = eval_pdf_cond_gauss(ev2, S.mean, S.cov);
+CPDpot(n,2:T) = num2cell(obslik, 1);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/convert_dbn_CPDs_to_tables1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/convert_dbn_CPDs_to_tables1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,162 @@
+function CPDpot = convert_dbn_CPDs_to_tables1(bnet, evidence)
+% CONVERT_DBN_CPDS_TO_TABLES Convert CPDs of (possibly instantiated) DBN nodes to tables
+% CPDpot = convert_dbn_CPDs_to_tables(bnet, evidence)
+%
+% CPDpot{n,t} is a table containing P(n,t|pa(n,t), ev)
+% All hidden nodes are assumed to be discrete
+% We assume the observed nodes are the same in every slice
+%
+% Evaluating the conditional likelihood of the evidence can be very slow,
+% so we take pains to vectorize where possible, i.e., we try to avoid
+% calling convert_to_table
+
+[ss T] = size(evidence);
+%obs_bitv = ~isemptycell(evidence(:));
+obs_bitv = zeros(1, 2*ss);
+obs_bitv(bnet.observed) = 1;
+obs_bitv(bnet.observed+ss) = 1;
+
+ns = bnet.node_sizes(:);
+CPDpot = cell(ss,T);
+
+for n=1:ss
+ % slice 1
+ t = 1;
+ ps = parents(bnet.dag, n);
+ e = bnet.equiv_class(n, 1);
+ if ~any(obs_bitv(ps))
+ CPDpot{n,t} = convert_CPD_to_table_hidden_ps(bnet.CPD{e}, evidence{n,t});
+ else
+ CPDpot{n,t} = convert_to_table(bnet.CPD{e}, [ps n], evidence(:,1));
+ end
+
+ % slices 2..T
+ debug = 1;
+ if ~obs_bitv(n)
+ CPDpot = helper_hidden_child(bnet, evidence, n, CPDpot, obs_bitv, debug);
+ else
+ CPDpot = helper_obs_child(bnet, evidence, n, CPDpot, obs_bitv, debug);
+ end
+end
+
+if 0
+CPDpot2 = convert_dbn_CPDs_to_tables_slow(bnet, evidence);
+for t=1:T
+ for n=1:ss
+ if ~approxeq(CPDpot{n,t}, CPDpot2{n,t})
+ fprintf('CPDpot n=%d, t=%d\n',n,t);
+ keyboard
+ end
+ end
+end
+end
+
+
+% special cases: c=child, p=parents, d=discrete, h=hidden, 1=1slice
+% if c=h=1 then c=d=1, since hidden nodes must be discrete
+% c=h c=d p=h p=d p=1 method
+% ---------------------------
+% 1 1 1 1 - replicate CPT
+% 0 1 1 1 1 dhmm
+% 0 0 1 1 1 ghmm
+% - 1 - 1 - evaluate CPT on evidence
+% other loop
+
+%%%%%%%
+function CPDpot = helper_hidden_child(bnet, evidence, n, CPDpot, obs_bitv, debug)
+
+[ss T] = size(evidence);
+self = n+ss;
+ps = parents(bnet.dag, self);
+e = bnet.equiv_class(n, 2);
+ns = bnet.node_sizes(:);
+if ~any(obs_bitv(ps)) % all parents are hidden (hence discrete)
+ if debug, fprintf('node %d is hidden, all ps are hidden\n', n); end
+ if myismember(n, bnet.dnodes)
+ %CPT = CPD_to_CPT(bnet.CPD{e});
+ %CPT = reshape(CPT, [prod(ns(ps)) ns(self)]);
+ CPT = convert_CPD_to_table_hidden_ps(bnet.CPD{e}, []);
+ CPDpot(n,2:T) = num2cell(repmat(CPT, [1 1 T-1]), [1 2]);
+ else
+ error(['hidden cts node disallowed'])
+ end
+else % some parents are observed - slow
+ if mysubset(ps, bnet.dnodes) % all parents are discrete
+ % given CPT(p1, p2, p3, p4, c), where p1,p3 are observed
+ % we create CPT([p2 p4 c], [p1 p3]).
+ % We then convert all observed p1,p3 into indices ndx
+ % and return CPT(:, ndx)
+ CPT = CPD_to_CPT(bnet.CPD{e});
+ domain = [ps self];
+ % if dom is [3 7 8] and 3,8 are observed, odom_rel = [1 3], hdom_rel = 2,
+ % odom = [3 8], hdom = 7
+ odom_rel = find(obs_bitv(domain));
+ hdom_rel = find(~obs_bitv(domain));
+ odom = domain(odom_rel);
+ hdom = domain(hdom_rel);
+ CPT = permute(CPT, [hdom_rel odom_rel]);
+ CPT = reshape(CPT, prod(ns(hdom)), prod(ns(odom)));
+ parents_in_same_slice = all(ps > ss);
+ if parents_in_same_slice
+ if debug, fprintf('node %d is hidden, some ps are obs, all ps discrete, 1 slice\n', n); end
+ data = cell2num(evidence(odom-ss,2:T)); %data(i,t) = val of i'th obs parent at t+1
+ else
+ if debug, fprintf('node %d is hidden, some ps are obs, all ps discrete, 2 slice\n', n); end
+ data = zeros(length(odom), T-1);
+ for t=2:T
+ ev = evidence(:,t-1:t);
+ data(:,t-1) = cell2num(ev(odom));
+ end
+ end
+ ndx = subv2ind(ns(odom), data'); % ndx(t) encodes data(:,t)
+ CPDpot(n,2:T) = num2cell(CPT(:, ndx), [1 2]);
+ else % some parents are cts - v slow
+ if debug, fprintf('node %d is hidden, some ps are obs, some ps cts\n', n); end
+ for t=2:T
+ CPDpot{n,t} = convert_to_table(bnet.CPD{e}, [ps self], evidence(:,t-1:t));
+ end
+ end
+end
+
+%%%%%%%
+function CPDpot = helper_obs_child(bnet, evidence, n, CPDpot, obs_bitv, debug)
+
+[ss T] = size(evidence);
+self = n+ss;
+ps = parents(bnet.dag, self);
+e = bnet.equiv_class(n, 2);
+ns = bnet.node_sizes(:);
+if ~any(obs_bitv(ps)) % all parents are hidden
+ parents_in_same_slice = all(ps > ss);
+ if parents_in_same_slice
+ if debug, fprintf('node %d is obs, all ps are hidden, 1 slice\n', n); end
+ ps1 = ps - ss;
+ if myismember(n, bnet.dnodes)
+ CPT = CPD_to_CPT(bnet.CPD{e});
+ CPT = reshape(CPT, [prod(ns(ps)) ns(self)]); % what if no parents?
+ obslik = eval_pdf_cond_multinomial(cell2num(evidence(n,2:T)), CPT);
+ CPDpot(n,2:T) = num2cell(obslik, 1);
+ else
+ S = struct(bnet.CPD{e});
+ obslik = eval_pdf_cond_gauss(cell2num(evidence(n,2:T)), S.mean, S.cov);
+ CPDpot(n,2:T) = num2cell(obslik, 1);
+ end
+ else % parents span 2 slices - slow
+ if debug, fprintf('node %d is obs, all ps are hidden , 2 slice\n', n); end
+ for t=2:T
+ CPDpot{n,t} = convert_to_table(bnet.CPD{e}, [ps self], evidence(:,t-1:t));
+ end
+ end
+else
+ if isempty(ps) % observed root
+ if debug, fprintf('node %d is obs, no ps\n', n); end
+ CPT = CPD_to_CPT(bnet.CPD{e});
+ data = cell2num(evidence(n,2:T));
+ CPDpot(n,2:T) = CPT(data);
+ else % some parents are observed - slow
+ if debug, fprintf('node %d is obs, some ps are obs\n', n); end
+ for t=2:T
+ CPDpot{n,t} = convert_to_table(bnet.CPD{e}, [ps self], evidence(:,t-1:t));
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/convert_dbn_CPDs_to_tables_slow.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/convert_dbn_CPDs_to_tables_slow.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function CPDpot = convert_dbn_CPDs_to_tables_slow(bnet, evidence)
+% CONVERT_DBN_CPDS_TO_TABLES_SLOW Convert CPDs of (possibly instantiated) DBN nodes to tables
+% CPDpot = convert_dbn_CPDs_to_tables_slow(bnet, evidence)
+%
+% CPDpot{n,t} is a table containing P(n,t|pa(n,t), ev)
+% All hidden nodes are assumed to be discrete
+%
+% Non-vectorized method; this is less efficient for long sequences of observed Gaussian
+% nodes, because of the (unnecessary) repeated matrix inversion.
+
+obs_bitv = ~isemptycell(evidence(:));
+[ss T] = size(evidence);
+ns = bnet.node_sizes(:);
+
+CPDpot = cell(ss,T);
+
+t = 1;
+for n=1:ss
+ %ps = engine.bnet_parents{n};
+ ps = parents(bnet.dag, n);
+ e = bnet.equiv_class(n, 1);
+ if ~any(obs_bitv(ps))
+ CPDpot{n,t} = convert_CPD_to_table_hidden_ps(bnet.CPD{e}, evidence{n,t});
+ else
+ CPDpot{n,t} = convert_to_table(bnet.CPD{e}, [ps n], evidence(:,1));
+ end
+end
+for t=2:T
+ for n=1:ss
+ self = n+ss;
+ ps = parents(bnet.dag, self);
+ e = bnet.equiv_class(n, 2);
+ if ~any(obs_bitv(ps))
+ CPDpot{n,t} = convert_CPD_to_table_hidden_ps(bnet.CPD{e}, evidence{n,t});
+ else
+ CPDpot{n,t} = convert_to_table(bnet.CPD{e}, [ps self], evidence(:,t-1:t));
+ end
+ end
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/dbn_to_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/dbn_to_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function bnet = dbn_to_bnet(dbn, T)
+% DBN_TO_BNET Convert a DBN to a static network by unroll for T slices
+% bnet = dbn_to_bnet(dbn, T)
+
+ss = length(dbn.intra);
+eclass = [dbn.equiv_class(:,1) repmat(dbn.equiv_class(:,2), 1, T-1)];
+dnodes = unroll_set(dbn.dnodes_slice, ss, T);
+ns = repmat(dbn.node_sizes_slice(:), 1, T);
+dag = unroll_dbn_topology(dbn.intra, dbn.inter, T, dbn.intra1);
+onodes = unroll_set(dbn.observed(:), ss, T);
+bnet = mk_bnet(dag, ns(:), 'discrete', dnodes(:), 'equiv_class', eclass(:), 'observed', onodes(:));
+bnet.CPD = dbn.CPD;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/dbn_to_hmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/dbn_to_hmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,81 @@
+function [startprob, transprob, obsprob] = dbn_to_hmm(bnet)
+% DBN_TO_HMM % Convert DBN params to HMM params
+% [startprob, transprob, obsprob] = dbn_to_hmm(bnet, onodes)
+% startprob(i)
+% transprob(i,j)
+% obsprob{k}.big_CPT(i,o) if k'th observed node is discrete
+% obsprob{k}.big_mu(:,i), .big_Sigma(:,:,i) if k'th observed node is Gaussian
+% Big means the domain contains all the hidden discrete nodes, not just the parents.
+
+% Called by constructor and by update_engine
+
+ss = length(bnet.intra);
+onodes = bnet.observed;
+hnodes = mysetdiff(1:ss, onodes);
+evidence = cell(ss, 2);
+ns = bnet.node_sizes(:);
+Qh = prod(ns(hnodes));
+tmp = dpot_to_table(compute_joint_pot(bnet, hnodes, evidence));
+startprob = reshape(tmp, Qh, 1);
+
+tmp = dpot_to_table(compute_joint_pot(bnet, hnodes+ss, evidence, [hnodes hnodes+ss]));
+transprob = mk_stochastic(reshape(tmp, Qh, Qh));
+
+% P(o|ps) is used by mk_hmm_obs_lik_vec for a single time slice
+% P(o|h) (the big version), where h = all hidden nodes, is used by enter_evidence
+
+obsprob = cell(1, length(onodes));
+for i=1:length(onodes)
+ o = onodes(i);
+ if bnet.auto_regressive(o)
+ % We assume the parents of this node are all the hidden nodes in the slice,
+ % so the params already are "big". Also, we assume we regress only on our old selves.
+ % slice 1
+ e = bnet.equiv_class(o);
+ CPD = struct(bnet.CPD{e});
+ O = ns(o);
+ ps = bnet.parents{o};
+ Qps = prod(ns(ps));
+ obsprob{i}.big_mu0 = reshape(CPD.mean, [O Qps]);
+ obsprob{i}.big_Sigma0 = reshape(CPD.cov, [O O Qps]);
+
+ % slice t>1
+ e = bnet.equiv_class(o+ss);
+ CPD = struct(bnet.CPD{e});
+ O = ns(o);
+ dps = mysetdiff(bnet.parents{o+ss}, o);
+ Qdps = prod(ns(dps));
+ obsprob{i}.big_mu = reshape(CPD.mean, [O Qdps]);
+ obsprob{i}.big_Sigma = reshape(CPD.cov, [O O Qdps]);
+ obsprob{i}.big_W = reshape(CPD.weights, [O O Qdps]);
+ else
+ e = bnet.equiv_class(o+ss);
+ CPD = struct(bnet.CPD{e});
+ O = ns(o);
+ ps = bnet.parents{o};
+ Qps = prod(ns(ps));
+ % We make a big potential, replicating the params if necessary
+ % e.g., for a 2 chain coupled HMM, mu(:,Q1) becomes mu(:,Q1,Q2)
+ bigpot = pot_to_marginal(compute_joint_pot(bnet, onodes(i), evidence, [hnodes onodes(i)]));
+
+ if myismember(o, bnet.dnodes)
+ obsprob{i}.CPT = reshape(CPD.CPT, [Qps O]);
+ obsprob{i}.big_CPT = reshape(bigpot.T, Qh, O);
+ else
+ obsprob{i}.big_mu = bigpot.mu;
+ obsprob{i}.big_Sigma = bigpot.Sigma;
+
+ if 1
+ obsprob{i}.mu = reshape(CPD.mean, [O Qps]);
+ C = reshape(CPD.cov, [O O Qps]);
+ obsprob{i}.Sigma = C;
+ d = size(obsprob{i}.mu, 1);
+ for j=1:Qps
+ obsprob{i}.inv_Sigma(:,:,j) = inv(C(:,:,j));
+ obsprob{i}.denom(j) = (2*pi)^(d/2)*sqrt(abs(det(C(:,:,j))));
+ end
+ end
+
+ end % if discrete
+ end % if ar
+end % for
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/determine_elim_constraints.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/determine_elim_constraints.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+function partial_order = determine_elim_constraints(bnet, onodes)
+% DETERMINE_ELIM_CONSTRAINTS Determine what the constraints are (if any) on the elimination ordering.
+% partial_order = determine_elim_constraints(bnet, onodes)
+%
+% A graph with different kinds of nodes (e.g., discrete and cts, or decision and rnd) is called marked.
+% A strong root is guaranteed to exist if the marked graph is triangulated and does not have any paths of
+% the form discrete -> cts -> discrete. In general we need to add extra edges to
+% the moral graph to ensure this (see example in Lauritzen (1992) fig 3b).
+% However, a simpler sufficient condition is to eliminate all the cts nodes before the discrete ones,
+% because then, as we move from the leaves to the root, the cts nodes get marginalized away
+% and we are left with purely discrete cliques.
+%
+% partial_order(i,j)=1 if we must marginalize j *before* i
+% (so i will be nearer the strong root).
+% If the hidden nodes are either all discrete or all cts, we set partial_order = [].
+%
+% For details, see
+% - Jensen, Jensen and Dittmer, "From influence diagrams to junction trees", UAI 94.
+% - Lauritzen, "Propgation of probabilities, means, and variances in mixed graphical
+% association models", JASA 87(420):1098--1108, 1992.
+% - K. Olesen, "Causal probabilistic networks with both discrete and continuous variables",
+% IEEE Pami 15(3), 1993
+
+
+n = length(bnet.dag);
+pot_type = determine_pot_type(bnet, onodes);
+if (pot_type == 'd') | (pot_type == 'g')
+ partial_order = [];
+ return;
+end
+
+
+partial_order = sparse(n,n);
+partial_order(bnet.dnodes, bnet.cnodes) = 1;
+
+% Integrate out cts nodes before their discrete parents - see Olesen (1993) p9
+% This method gives the wrong results on cg1.m!
+if 0
+for i=bnet.cnodes(:)'
+ dps = myintersect(parents(bnet.dag, i), bnet.dnodes);
+ partial_order(dps, i)=1;
+end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/dispcpt.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/dispcpt.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function display_CPT(CPT)
+
+n = ndims(CPT);
+parents_size = size(CPT);
+parents_size = parents_size(1:end-1);
+child_size = size(CPT,n);
+c = 1;
+for i=1:prod(parents_size)
+ parent_inst = ind2subv(parents_size, i);
+ fprintf(1, '%d ', parent_inst);
+ fprintf(1, ': ');
+ index = num2cell([parent_inst 1]);
+ index{n} = ':';
+ fprintf(1, '%6.4f ', CPT(index{:}));
+ fprintf(1, '\n');
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/do_intervention.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/do_intervention.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function bnet = mutilate_bnet(bnet, nodes, vals)
+% MUTILATE_BNET Clamp nodes to specific values (perform a surgical intervention)
+% bnet = mutilate_bnet(bnet, nodes, vals)
+%
+% We make all the clamped nodes roots.
+
+ns = bnet.node_sizes;
+for i=1:length(nodes)
+ X = nodes(i);
+ x = vals(i);
+ bnet.dag(:,X) = 0;
+ bnet.CPD{X} = root_CPD(bnet, X, x);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/dsep.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/dsep.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function sep = dsep(X, Y, S, G)
+% DSEP Is X indep Y given S wrt DAG G?
+% sep = dsep(X, Y, S, G)
+%
+% Instead of using the Bayes-Ball criterion, we see if S separates X and Y
+% in the moralized ancestral graph.
+
+conn = reachability_graph(G);
+M = myunion(myunion(X, Y), S);
+[A,junk] = find(conn(:, M));
+A = unique(A);
+A = myunion(A, M);
+GM = moralize(G(A,A));
+%sep = graph_separated(GM, X, Y, S);
+sep = graph_separated(GM, find_equiv_posns(X,A), find_equiv_posns(Y,A), find_equiv_posns(S,A));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/dsep_test.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/dsep_test.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+
+% Cowell et al p72
+G = zeros(10);
+G(1,2)=1;
+G(2,3)=1;
+G(3,7)=1;
+G(4,[5 8])=1;
+G(5,6)=1;
+G(6,7)=1;
+G(7,[9 10])=1;
+G(8,9)=1;
+
+dsep(1, 4, [5 7], G)
+dsep(1, 4, [7], G)
+dsep(1, 4, [10 5], G)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/enumerate_scenarios.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/enumerate_scenarios.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function [scenarios, log_probs] = enumerate_scenarios(bnet, evidence)
+% ENUMERATE_SCENARIOS Enumerate all assignments, and return the prob. of the non-zeros ones
+% function [scenarios, log_probs] = enumerate_scenarios(bnet, evidence)
+
+assert(isempty(bnet.cnodes));
+n = length(bnet.dag);
+observed = ~isemptycell(evidence);
+vals = cat(1,evidence{observed});
+vals = vals(:)';
+ns = bnet.node_sizes;
+
+log_probs = [];
+scenarios = [];
+for i=1:prod(ns)
+ inst = ind2subv(ns, i); % i'th instantiation
+ if isempty(vals) | inst(observed) == vals % agrees with evidence
+ ll = log_lik_complete(bnet, num2cell(inst(:)));
+ log_probs = [log_probs ll];
+ scenarios = [scenarios(:)' inst];
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/fgraph_to_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/fgraph_to_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function bnet = fgraph_to_bnet(fg)
+% FGRAPH_TO_BNET Convert a factor graph to a Bayes net
+% bnet = fgraph_to_bnet(fg)
+%
+% We assume all factors are tabular_CPD.
+% We create 1 dummy observed node for every factor.
+
+N = fg.nvars + fg.nfactors;
+vnodes = 1:fg.nvars;
+fnodes = fg.nvars+1:N;
+dag = zeros(N);
+for x=1:fg.nvars
+ dag(x, fnodes(fg.dep{x})) = 1;
+end
+ns = [fg.node_sizes ones(1, fg.nfactors)];
+discrete = [fg.dnodes fnodes];
+bnet = mk_bnet(dag, ns, 'discrete', discrete);
+for x=1:fg.nvars
+ bnet.CPD{x} = tabular_CPD(bnet, x, 'CPT', 'unif');
+end
+ev = cell(1, fg.nvars); % no evidence
+for i=1:fg.nfactors
+ f = fnodes(i);
+ e = fg.equiv_class(i);
+ pot = convert_to_pot(fg.factors{e}, 'd', fg.dom{i}, ev);
+ m = pot_to_marginal(pot);
+ bnet.CPD{f} = tabular_CPD(bnet, f, 'CPT', m.T);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/hodbn_to_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/hodbn_to_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function bnet = hodbn_to_bnet(dbn, T)
+% DBN_TO_BNET Convert a DBN to a static network by unroll for T slices
+% bnet = dbn_to_bnet(dbn, T)
+ss = length(dbn.intra);
+[row,order] = size(dbn.equiv_class);
+eclass = [];
+for i = 1:min(order,T)
+ eclass = [eclass ; dbn.equiv_class(:,i)];
+end
+if T > order
+ eclass = [eclass ; repmat(dbn.equiv_class(:,order),T-order,1)];
+end
+
+dnodes = unroll_set(dbn.dnodes_slice, ss, T);
+ns = repmat(dbn.node_sizes_slice(:), 1, T);
+dag = unroll_higher_order_topology(dbn.intra, dbn.inter, T, dbn.intra1);
+onodes = unroll_set(dbn.observed(:), ss, T);
+bnet = mk_bnet(dag, ns(:), 'discrete', dnodes(:), 'equiv_class', eclass(:), 'observed', onodes(:));
+bnet.CPD = dbn.CPD;
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/is_mnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/is_mnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+function m = is_mnet(model)
+
+m = isfield(model, 'markov_net');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/linear_gaussian_to_cpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/linear_gaussian_to_cpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+function pot = linear_gaussian_to_cpot(mu, Sigma, W, domain, ns, cnodes, evidence)
+% LINEAR_GAUSSIAN_TO_CPOT Convert a linear Gaussian CPD to a canonical potential.
+% pot = linear_gaussian_to_cpot(mu, Sigma, W, domain, ns, cnodes, evidence)
+%
+% We include any cts evidence, but ignore any discrete evidence.
+% (Use gaussian_CPD_params_given_dps to use discrete evidence to select mu, Sigma, W.)
+
+odom = domain(~isemptycell(evidence(domain)));
+hdom = domain(isemptycell(evidence(domain)));
+cobs = myintersect(cnodes, odom);
+chid = myintersect(cnodes, hdom);
+cvals = cat(1, evidence{cobs});
+
+%[g,h,K] = gaussian_to_canonical(mu, Sigma, W);
+Sinv = inv(Sigma);
+g = -0.5*mu'*Sinv*mu + log(normal_coef(Sigma));
+if isempty(W) | (size(W,2)==0) % no cts parents
+ h = Sinv*mu;
+ K = Sinv;
+else
+ h = [-W'*Sinv*mu; Sinv*mu];
+ K = [W'*Sinv*W -W'*Sinv';
+ -Sinv*W Sinv];
+end
+
+if ~isempty(cvals)
+ %[g, h, K] = enter_evidence_canonical(g, h, K, chid, cobs, cvals(:), ns);
+ [hx, hy, KXX, KXY, KYX, KYY] = partition_matrix_vec(h, K, chid, cobs, ns);
+ y = cvals(:);
+ g = g + hy'*y - 0.5*y'*KYY*y;
+ if length(hx)==0 % isempty(X) % i.e., we have instantiated everything away
+ h = [];
+ K = [];
+ else
+ h = hx - KXY*y;
+ K = KXX;
+ end
+end
+
+ns(odom) = 0;
+pot = cpot(domain, ns(domain), g, h, K);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/log_lik_complete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/log_lik_complete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function L = log_lik_complete(bnet, cases, clamped)
+% LOG_LIK_COMPLETE Compute sum_m sum_i log P(x(i,m)| x(pi_i,m), theta_i) for a completely observed data set
+% L = log_lik_complete(bnet, cases, clamped)
+%
+% If there is a missing data, you must use an inference engine.
+% cases(i,m) is the value assigned to node i in case m.
+% (If there are vector-valued nodes, cases should be a cell array.)
+% clamped(i,m) = 1 if node i was set by intervention in case m (default: clamped = zeros)
+% Clamped nodes contribute a factor of 1.0 to the likelihood.
+
+if iscell(cases), usecell = 1; else usecell = 0; end
+
+n = length(bnet.dag);
+ncases = size(cases, 2);
+if n ~= size(cases, 1)
+ error('data should be of size nnodes * ncases');
+end
+
+if nargin < 3, clamped = zeros(n,ncases); end
+
+L = 0;
+for i=1:n
+ ps = parents(bnet.dag, i);
+ e = bnet.equiv_class(i);
+ u = find(clamped(i,:)==0);
+ ll = log_prob_node(bnet.CPD{e}, cases(i,u), cases(ps,u));
+ if approxeq(exp(ll), 0), fprintf('node %d has very low likelihood\n'); end
+ L = L + ll;
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/log_marg_lik_complete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/log_marg_lik_complete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function L = log_marg_lik_complete(bnet, cases, clamped)
+% LOG_MARG_LIK_COMPLETE Compute sum_m sum_i log P(x(i,m)| x(pi_i,m)) for a completely observed data set
+% L = log_marg_lik_complete(bnet, cases, clamped)
+%
+% This differs from log_lik_complete because we integrate out the parameters.
+% If there is a missing data, you must use an inference engine.
+% cases(i,m) is the value assigned to node i in case m.
+% (If there are vector-valued nodes, cases should be a cell array.)
+% clamped(i,m) = 1 if node i was set by intervention in case m (default: clamped = zeros)
+% Clamped nodes contribute a factor of 1.0 to the likelihood.
+%
+% If there is a single case, clamped is a list of the clamped nodes, not a bit vector.
+
+if iscell(cases), usecell = 1; else usecell = 0; end
+
+n = length(bnet.dag);
+ncases = size(cases, 2);
+if n ~= size(cases, 1)
+ error('data should be of size nnodes * ncases');
+end
+
+if ncases == 1
+ if nargin < 3, clamped = []; end
+ clamp_set = clamped;
+ clamped = zeros(n,1);
+ clamped(clamp_set) = 1;
+else
+ if nargin < 3, clamped = zeros(n,ncases); end
+end
+
+L = 0;
+for i=1:n
+ ps = parents(bnet.dag, i);
+ e = bnet.equiv_class(i);
+ u = find(clamped(i,:)==0);
+ L = L + log_marg_prob_node(bnet.CPD{e}, cases(i,u), cases(ps,u));
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,93 @@
+function bnet = mk_bnet(dag, node_sizes, varargin)
+% MK_BNET Make a Bayesian network.
+%
+% BNET = MK_BNET(DAG, NODE_SIZES, ...) makes a graphical model with an arc from i to j iff DAG(i,j) = 1.
+% Thus DAG is the adjacency matrix for a directed acyclic graph.
+% The nodes are assumed to be in topological order. Use TOPOLOGICAL_SORT if necessary.
+%
+% node_sizes(i) is the number of values node i can take on,
+% or the length of node i if i is a continuous-valued vector.
+% node_sizes(i) = 1 if i is a utility node.
+%
+% Below are the names of optional arguments [and their default value in brackets].
+% Pass as 'PropertyName1', PropertyValue1, 'PropertyName2', PropertyValue2, ...
+%
+% discrete - the list of nodes which are discrete random variables [1:N]
+% equiv_class - equiv_class(i)=j means node i gets its params from CPD{j} [1:N]
+% observed - the list of nodes which will definitely be observed in every case [ [] ]
+% 'names' - a cell array of strings to be associated with nodes 1:n [{}]
+% This creates an associative array, so you write e.g.
+% 'evidence(bnet.names{'bar'}) = 42' instead of 'evidence(2} = 42'
+% assuming names = { 'foo', 'bar', ...}.
+%
+% e.g., bnet = mk_bnet(dag, ns, 'discrete', [1 3])
+%
+% For backwards compatibility with BNT2, you can also specify the parameters in the following order
+% bnet = mk_bnet(dag, node_sizes, discrete_nodes, equiv_class)
+
+n = length(dag);
+
+% default values for parameters
+bnet.equiv_class = 1:n;
+bnet.dnodes = 1:n; % discrete
+bnet.observed = [];
+bnet.names = {};
+
+if nargin >= 3
+ args = varargin;
+ nargs = length(args);
+ if ~isstr(args{1})
+ if nargs >= 1, bnet.dnodes = args{1}; end
+ if nargs >= 2, bnet.equiv_class = args{2}; end
+ else
+ for i=1:2:nargs
+ switch args{i},
+ case 'equiv_class', bnet.equiv_class = args{i+1};
+ case 'discrete', bnet.dnodes = args{i+1};
+ case 'observed', bnet.observed = args{i+1};
+ case 'names', bnet.names = assocarray(args{i+1}, num2cell(1:n));
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+ end
+end
+
+bnet.observed = sort(bnet.observed); % for comparing sets
+bnet.hidden = mysetdiff(1:n, bnet.observed(:)');
+bnet.hidden_bitv = zeros(1,n);
+bnet.hidden_bitv(bnet.hidden) = 1;
+bnet.dag = dag;
+bnet.node_sizes = node_sizes(:)';
+
+bnet.cnodes = mysetdiff(1:n, bnet.dnodes);
+% too many functions refer to cnodes to rename it to cts_nodes -
+% We hope it won't be confused with chance nodes!
+
+bnet.parents = cell(1,n);
+for i=1:n
+ bnet.parents{i} = parents(dag, i);
+end
+
+E = max(bnet.equiv_class);
+mem = cell(1,E);
+for i=1:n
+ e = bnet.equiv_class(i);
+ mem{e} = [mem{e} i];
+end
+bnet.members_of_equiv_class = mem;
+
+bnet.CPD = cell(1, E);
+
+bnet.rep_of_eclass = zeros(1,E);
+for e=1:E
+ mems = bnet.members_of_equiv_class{e};
+ bnet.rep_of_eclass(e) = mems(1);
+end
+
+directed = 1;
+if ~acyclic(dag,directed)
+ error('graph must be acyclic')
+end
+
+bnet.order = topological_sort(bnet.dag);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,133 @@
+function bnet = mk_dbn(intra, inter, node_sizes, varargin)
+% MK_DBN Make a Dynamic Bayesian Network.
+%
+% BNET = MK_DBN(INTRA, INTER, NODE_SIZES, ...) makes a DBN with arcs
+% from i in slice t to j in slice t iff intra(i,j) = 1, and
+% from i in slice t to j in slice t+1 iff inter(i,j) = 1,
+% for i,j in {1, 2, ..., n}, where n = num. nodes per slice, and t >= 1.
+% node_sizes(i) is the number of values node i can take on.
+% The nodes are assumed to be in topological order. Use TOPOLOGICAL_SORT if necessary.
+% See also mk_bnet.
+%
+% Optional arguments [default in brackets]
+% 'discrete' - list of discrete nodes [1:n]
+% 'observed' - the list of nodes which will definitely be observed in every slice of every case [ [] ]
+% 'eclass1' - equiv class for slice 1 [1:n]
+% 'eclass2' - equiv class for slice 2 [tie nodes with equivalent parents to slice 1]
+% equiv_class1(i) = j means node i in slice 1 gets its parameters from bnet.CPD{j},
+% i.e., nodes i and j have tied parameters.
+% 'intra1' - topology of first slice, if different from others
+% 'names' - a cell array of strings to be associated with nodes 1:n [{}]
+% This creates an associative array, so you write e.g.
+% 'evidence(bnet.names{'bar'}) = 42' instead of 'evidence(2} = 42'
+% assuming names = { 'foo', 'bar', ...}.
+%
+% For backwards compatibility with BNT2, arguments can also be specified as follows
+% bnet = mk_dbn(intra, inter, node_sizes, dnodes, eclass1, eclass2, intra1)
+%
+% After calling this function, you must specify the parameters (conditional probability
+% distributions) using bnet.CPD{i} = gaussian_CPD(...) or tabular_CPD(...) etc.
+
+
+n = length(intra);
+ss = n;
+bnet.nnodes_per_slice = ss;
+bnet.intra = intra;
+bnet.inter = inter;
+bnet.intra1 = intra;
+dag = zeros(2*n);
+dag(1:n,1:n) = bnet.intra1;
+dag(1:n,(1:n)+n) = bnet.inter;
+dag((1:n)+n,(1:n)+n) = bnet.intra;
+bnet.dag = dag;
+bnet.names = {};
+
+directed = 1;
+if ~acyclic(dag,directed)
+ error('graph must be acyclic')
+end
+
+
+bnet.eclass1 = 1:n;
+%bnet.eclass2 = (1:n)+n;
+bnet.eclass2 = bnet.eclass1;
+for i=1:ss
+ if isequal(parents(dag, i+ss), parents(dag, i)+ss)
+ %fprintf('%d has isomorphic parents, eclass %d\n', i, bnet.eclass2(i))
+ else
+ bnet.eclass2(i) = max(bnet.eclass2) + 1;
+ %fprintf('%d has non isomorphic parents, eclass %d\n', i, bnet.eclass2(i))
+ end
+end
+
+dnodes = 1:n;
+bnet.observed = [];
+
+if nargin >= 4
+ args = varargin;
+ nargs = length(args);
+ if ~isstr(args{1})
+ if nargs >= 1, dnodes = args{1}; end
+ if nargs >= 2, bnet.eclass1 = args{2}; end
+ if nargs >= 3, bnet.eclass2 = args{3}; end
+ if nargs >= 4, bnet.intra1 = args{4}; end
+ else
+ for i=1:2:nargs
+ switch args{i},
+ case 'discrete', dnodes = args{i+1};
+ case 'observed', bnet.observed = args{i+1};
+ case 'eclass1', bnet.eclass1 = args{i+1};
+ case 'eclass2', bnet.eclass2 = args{i+1};
+ case 'intra1', bnet.intra1 = args{i+1};
+ %case 'ar_hmm', bnet.ar_hmm = args{i+1}; % should check topology
+ case 'names', bnet.names = assocarray(args{i+1}, num2cell(1:n));
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+ end
+end
+
+
+bnet.observed = sort(bnet.observed); % for comparing sets
+ns = node_sizes;
+bnet.node_sizes_slice = ns(:)';
+bnet.node_sizes = [ns(:) ns(:)];
+
+cnodes = mysetdiff(1:n, dnodes);
+bnet.dnodes_slice = dnodes;
+bnet.cnodes_slice = cnodes;
+bnet.dnodes = [dnodes dnodes+n];
+bnet.cnodes = [cnodes cnodes+n];
+
+bnet.equiv_class = [bnet.eclass1(:) bnet.eclass2(:)];
+bnet.CPD = cell(1,max(bnet.equiv_class(:)));
+eclass = bnet.equiv_class(:);
+E = max(eclass);
+bnet.rep_of_eclass = zeros(1,E);
+for e=1:E
+ mems = find(eclass==e);
+ bnet.rep_of_eclass(e) = mems(1);
+end
+
+ss = n;
+onodes = bnet.observed;
+hnodes = mysetdiff(1:ss, onodes);
+bnet.hidden_bitv = zeros(1,2*ss);
+bnet.hidden_bitv(hnodes) = 1;
+bnet.hidden_bitv(hnodes+ss) = 1;
+
+bnet.parents = cell(1, 2*ss);
+for i=1:ss
+ bnet.parents{i} = parents(bnet.dag, i);
+ bnet.parents{i+ss} = parents(bnet.dag, i+ss);
+end
+
+bnet.auto_regressive = zeros(1,ss);
+% ar(i)=1 means (observed) node i depends on i in the previous slice
+for o=bnet.observed(:)'
+ if any(bnet.parents{o+ss} <= ss)
+ bnet.auto_regressive(o) = 1;
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_fgraph.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_fgraph.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,60 @@
+function fg = mk_fgraph(G, node_sizes, factors, varargin)
+% MK_FGRAPH Make a factor graph
+% fg = mk_fgraph(G, node_sizes, factors, ...)
+%
+% A factor graph is a bipartite graph, with one side containing variables,
+% and the other containing functions of (subsets of) these variables.
+% For details, see "Factor Graphs and the Sum-Product Algorithm",
+% F. Kschischang and B. Frey and H-A. Loeliger,
+% IEEE Trans. Info. Theory, 2001
+%
+% G(i,j) = 1 if there is an arc from variable i to factor j
+%
+% node_sizes(i) is the number of values node i can take on,
+% or the length of node i if i is a continuous-valued vector.
+%
+% 'factors' is the list of factors (kernel functions)
+%
+% The list below gives optional arguments [default value in brackets].
+%
+% equiv_class - equiv_class(i)=j means factor node i gets its params from factors{j} [1:F]
+% discrete - the list of nodes which are discrete random variables [1:N]
+%
+% e.g., fg = mk_fgraph(G, [2 2], {bnet.CPD{1},bnet.CPD{2}}, 'discrete', [1 2])
+
+fg.G = G;
+fg.node_sizes = node_sizes;
+fg.factors = factors;
+[fg.nvars fg.nfactors] = size(G);
+
+% default values for parameters
+fg.equiv_class = 1:fg.nfactors;
+fg.dnodes = 1:fg.nvars;
+
+if nargin >= 4
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'equiv_class', fg.equiv_class = args{i+1};
+ case 'discrete', fg.dnodes = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+% so that determine_pot_type will work...
+fg.utility_nodes = [];
+%fg.decision_nodes = [];
+%fg.chance_nodes = fg.nvars;
+
+fg.dom = cell(1, fg.nfactors);
+for f=1:fg.nfactors
+ fg.dom{f} = find(G(:,f));
+end
+fg.dep = cell(1, fg.nvars);
+for x=1:fg.nvars
+ fg.dep{x} = find(G(x,:));
+end
+fg.cnodes = mysetdiff(1:fg.nvars, fg.dnodes);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_fgraph_given_ev.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_fgraph_given_ev.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+function fg = mk_fgraph_given_ev(G, node_sizes, factors, ev_CPD, evidence, varargin)
+% MK_FGRAPH_GIVEN_EV Make a factor graph where each node has its own private evidence term
+% fg = mk_fgraph(G, node_sizes, factors, ev_CPD, evidence, ...)
+%
+% G, node_sizes and factors are as in mk_fgraph, but they refer to the hidden nodes.
+% ev_CPD{i} is a CPD for the i'th hidden node; this will be converted into a factor
+% for node i using evidence{i}.
+% We currently assume all hidden nodes are discrete, for simplicity.
+%
+% The list below gives optional arguments [default value in brackets].
+%
+% equiv_class - equiv_class(i)=j means factor node i gets its params from factors{j} [1:F]
+% ev_equiv_class - ev_equiv_class(i)=j means evidence node i gets its params from ev_CPD{j} [1:N]
+
+
+N = length(node_sizes);
+nfactors = length(factors);
+
+% default values for parameters
+eclass = 1:nfactors;
+ev_eclass = 1:N;
+
+if nargin >= 6
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'equiv_class', eclass = args{i+1};
+ case 'ev_equiv_class', ev_eclass = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+pot_type = 'd';
+for x=1:N
+ ev = cell(1,2); % cell 1 is the hidden parent, cell 2 is the observed child
+ ev(2) = evidence(x);
+ dom = 1:2;
+ F = convert_to_pot(ev_CPD{ev_eclass(x)}, pot_type, dom(:), ev);
+ M = pot_to_marginal(F);
+ %factors{end+1} = tabular_CPD('self', 1, 'ps', [], 'sz', node_sizes(x), 'CPT', M.T);
+ factors{end+1} = mk_isolated_tabular_CPD(node_sizes(x), {'CPT', M.T});
+end
+
+E = max(eclass);
+fg = mk_fgraph([G eye(N)], node_sizes, factors, 'equiv_class', [eclass E+1:E+N]);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_higher_order_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_higher_order_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,181 @@
+function bnet = mk_higher_order_dbn(intra, inter, node_sizes, varargin)
+% MK_DBN Make a Dynamic Bayesian Network.
+%
+% BNET = MK_DBN(INTRA, INTER, NODE_SIZES, ...) makes a DBN with arcs
+% from i in slice t to j in slice t iff intra(i,j) = 1, and
+% from i in slice t to j in slice t+1 iff inter(i,j) = 1,
+% for i,j in {1, 2, ..., n}, where n = num. nodes per slice, and t >= 1.
+% node_sizes(i) is the number of values node i can take on.
+% The nodes are assumed to be in topological order. Use TOPOLOGICAL_SORT if necessary.
+% See also mk_bnet.
+%
+% Optional arguments [default in brackets]
+% 'discrete' - list of discrete nodes [1:n]
+% 'observed' - the list of nodes which will definitely be observed in every slice of every case [ [] ]
+% 'eclass1' - equiv class for slice 1 [1:n]
+% 'eclass2' - equiv class for slice 2 [tie nodes with equivalent parents to slice 1]
+% equiv_class1(i) = j means node i in slice 1 gets its parameters from bnet.CPD{j},
+% i.e., nodes i and j have tied parameters.
+% 'intra1' - topology of first slice, if different from others
+% 'names' - a cell array of strings to be associated with nodes 1:n [{}]
+% This creates an associative array, so you write e.g.
+% 'evidence(bnet.names{'bar'}) = 42' instead of 'evidence(2} = 42'
+% assuming names = { 'foo', 'bar', ...}.
+%
+% For backwards compatibility with BNT2, arguments can also be specified as follows
+% bnet = mk_dbn(intra, inter, node_sizes, dnodes, eclass1, eclass2, intra1)
+%
+% After calling this function, you must specify the parameters (conditional probability
+% distributions) using bnet.CPD{i} = gaussian_CPD(...) or tabular_CPD(...) etc.
+
+
+n = length(intra);
+ss = n;
+bnet.nnodes_per_slice = ss;
+bnet.intra = intra;
+bnet.inter = inter;
+bnet.intra1 = intra;
+
+% As this method is used to generate a higher order Markov Model
+% also connect from time slice t - i -> t with i > 1 has to be
+% taken into account.
+
+%inter should be a three dimensional array where inter(:,:,i)
+%describes the connections from time-slice t - i to t.
+[rows,columns,order] = size(inter);
+assert(rows == n);
+assert(columns == n);
+dag = zeros((order + 1)*n);
+
+i = 0;
+while i <= order
+ j = i;
+ while j <= order
+ if j == i
+ dag(1 + i*n:(i+1)*n,1+i*n:(i+1)*n) = intra;
+ else
+ dag(1+i*n:(i+1)*n,1+j*n:(j+1)*n) = inter(:,:,j - i);
+ end
+ j = j + 1;
+ end;
+ i = i + 1;
+end;
+
+bnet.dag = dag;
+bnet.names = {};
+
+directed = 1;
+if ~acyclic(dag,directed)
+ error('graph must be acyclic')
+end
+
+% Calculation of the equivalence classes
+bnet.eclass1 = 1:n;
+bnet.eclass = zeros(order + 1,ss);
+bnet.eclass(1,:) = 1:n;
+for i = 1:order
+ bnet.eclass(i+1,:) = bnet.eclass(i,:);
+ for j = 1:ss
+ if(isequal(parents(dag,(i-1)*n+j)+ss,parents(dag,(i*n + j))))
+ %fprintf('%d has isomorphic parents, eclass %d \n',j,bnet.eclass(i,j))
+ else
+ bnet.eclass(i + 1,j) = max(bnet.eclass(i+1,:))+1;
+ %fprintf('%d has non isomorphic parents, eclass %d \n',j,bnet.eclass(i,j))
+ end;
+ end;
+end;
+bnet.eclass1 = 1:n;
+
+% To be compatible with whe rest of the code
+bnet.eclass2 = bnet.eclass(2,:);
+
+dnodes = 1:n;
+bnet.observed = [];
+
+if nargin >= 4
+ args = varargin;
+ nargs = length(args);
+ if ~isstr(args{1})
+ if nargs >= 1 dnodes = args{1}; end
+ if nargs >= 2 bnet.eclass1 = args{2}; bnet.eclass(1,:) = args{2}; end
+ if nargs >= 3 bnet.eclass2 = args{3}; bnet.eclass(2,:) = args{2}; end
+ if nargs >= 4 bnet.intra1 = args{4}; end
+ else
+ for i=1:2:nargs
+ switch args{i},
+ case 'discrete', dnodes = args{i+1};
+ case 'observed', bnet.observed = args{i+1};
+ case 'eclass1', bnet.eclass1 = args{i+1}; bnet.eclass(1,:) = args{i+1};
+ case 'eclass2', bnet.eclass2 = args{i+1}; bnet.eclass(2,:) = args{i+1};
+ case 'eclass', bnet.eclass = args{i+1};
+ case 'intra1', bnet.intra1 = args{i+1};
+ %case 'ar_hmm', bnet.ar_hmm = args{i+1}; % should check topology
+ case 'names', bnet.names = assocarray(args{i+1}, num2cell(1:n));
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+ end
+end
+
+bnet.observed = sort(bnet.observed); % for comparing sets
+ns = node_sizes;
+bnet.node_sizes_slice = ns(:)';
+bnet.node_sizes = repmat(ns(:),1,order + 1);
+
+cnodes = mysetdiff(1:n, dnodes);
+bnet.dnodes_slice = dnodes;
+bnet.cnodes_slice = cnodes;
+bnet.dnodes = dnodes;
+bnet.cnodes = cnodes;
+% To adapt the function to higher order Markov models include dnodes for more
+% time slices
+for i = 1:order
+ bnet.dnodes = [bnet.dnodes dnodes+i*n];
+ bnet.cnodes = [bnet.cnodes cnodes+i*n];
+end
+
+% Generieren einer Matrix, deren i-te Spalte die Aequivalenzklassen
+% der i-ten Zeitscheibe enthaelt.
+bnet.equiv_class = [bnet.eclass(1,:)]';
+for i = 2:(order + 1)
+ bnet.equiv_class = [bnet.equiv_class bnet.eclass(i,:)'];
+end
+
+bnet.CPD = cell(1,max(bnet.equiv_class(:)));
+
+ss = n;
+onodes = bnet.observed;
+hnodes = mysetdiff(1:ss, onodes);
+bnet.hidden_bitv = zeros(1,(order + 1)*ss);
+for i = 0:order
+ bnet.hidden_bitv(hnodes +i*ss) = 1;
+end;
+
+bnet.parents = cell(1, (order + 1)*ss);
+for i=1:(order + 1)*ss
+ bnet.parents{i} = parents(bnet.dag, i);
+end
+
+bnet.auto_regressive = zeros(1,ss);
+% ar(i)=1 means (observed) node i depends on i in the previous slice
+for o=bnet.observed(:)'
+ if any(bnet.parents{o+ss} <= ss)
+ bnet.auto_regressive(o) = 1;
+ end
+end
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_limid.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_limid.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,93 @@
+function bnet = mk_limid(dag, node_sizes, varargin)
+% MK_LIMID Make a limited information influence diagram
+%
+% BNET = MK_LIMID(DAG, NODE_SIZES, ...)
+% DAG is the adjacency matrix for a directed acyclic graph.
+% The nodes are assumed to be in topological order. Use TOPOLOGICAL_SORT if necessary.
+% For decision nodes, the parents must explicitely include all nodes
+% on which it can depends, in contrast to the implicit no-forgetting assumption of influence diagrams.
+% (For details, see "Representing and solving decision problems with limited information",
+% Lauritzen and Nilsson, Management Science, 2001.)
+%
+% node_sizes(i) is the number of values node i can take on,
+% or the length of node i if i is a continuous-valued vector.
+% node_sizes(i) = 1 if i is a utility node.
+%
+% The list below gives optional arguments [default value in brackets].
+%
+% chance - the list of nodes which are random variables [1:N]
+% decision - the list of nodes which are decision nodes [ [] ]
+% utility - the list of nodes which are utility nodes [ [] ]
+% equiv_class - equiv_class(i)=j means node i gets its params from CPD{j} [1:N]
+%
+% e.g., limid = mk_limid(dag, ns, 'chance', [1 3], 'utility', [2])
+
+n = length(dag);
+
+% default values for parameters
+bnet.chance_nodes = 1:n;
+bnet.equiv_class = 1:n;
+bnet.utility_nodes = [];
+bnet.decision_nodes = [];
+bnet.dnodes = 1:n; % discrete
+
+if nargin >= 3
+ args = varargin;
+ nargs = length(args);
+ if ~isstr(args{1})
+ if nargs >= 1, bnet.dnodes = args{1}; end
+ if nargs >= 2, bnet.equiv_class = args{2}; end
+ else
+ for i=1:2:nargs
+ switch args{i},
+ case 'equiv_class', bnet.equiv_class = args{i+1};
+ case 'chance', bnet.chance_nodes = args{i+1};
+ case 'utility', bnet.utility_nodes = args{i+1};
+ case 'decision', bnet.decision_nodes = args{i+1};
+ case 'discrete', bnet.dnodes = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+ end
+end
+
+bnet.limid = 1;
+
+bnet.dag = dag;
+bnet.node_sizes = node_sizes(:)';
+
+bnet.cnodes = mysetdiff(1:n, bnet.dnodes);
+% too many functions refer to cnodes to rename it to cts_nodes -
+% We hope it won't be confused with chance nodes!
+
+bnet.parents = cell(1,n);
+for i=1:n
+ bnet.parents{i} = parents(dag, i);
+end
+
+E = max(bnet.equiv_class);
+mem = cell(1,E);
+for i=1:n
+ e = bnet.equiv_class(i);
+ mem{e} = [mem{e} i];
+end
+bnet.members_of_equiv_class = mem;
+
+bnet.CPD = cell(1, E);
+
+% for e=1:E
+% i = bnet.members_of_equiv_class{e}(1); % pick arbitrary member
+% switch type{e}
+% case 'tabular', bnet.CPD{e} = tabular_CPD(bnet, i);
+% case 'gaussian', bnet.CPD{e} = gaussian_CPD(bnet, i);
+% otherwise, error(['unrecognized CPD type ' type{e}]);
+% end
+% end
+
+directed = 1;
+if ~acyclic(dag,directed)
+ error('graph must be acyclic')
+end
+
+bnet.order = topological_sort(bnet.dag);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_mnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_mnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function mnet = mk_mnet(graph, node_sizes, cliques, potentials)
+% MK_MNET Make a Markov network (Markov Random Field)
+%
+% mnet = mk_mnet(adj_mat, node_sizes, cliques, potentials)
+%
+% cliques{i} is a list of the nodes in clq i
+% potentials{i} is a dpot object corresponding to the potential for clique i
+%
+
+mnet.markov_net = 1;
+mnet.graph = graph;
+mnet.node_sizes = node_sizes;
+mnet.user_cliques = cliques;
+mnet.user_potentials = potentials;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_mrf2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_mrf2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function mrf2 = mk_mrf2(adj_mat, pot)
+% MK_MRF2 Make a Markov random field with pairwise potentials
+% function mrf2 = mk_mrf2(adj_mat, pot)
+%
+% pot{i,j}(k1,k2)
+
+mrf2.adj_mat = adj_mat;
+mrf2.pot = pot;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_mutilated_samples.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_mutilated_samples.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function [data, clamped] = mk_mutilated_samples(bnet, ncases, max_clamp, usecell)
+% GEN_MUTILATED_SAMPLES Do random interventions and then draw random samples
+% [data, clamped] = gen_mutilated_samples(bnet, ncases, max_clamp, usecell)
+%
+% At each step, we pick a random subset of size 0 .. max_clamp, and
+% clamp these nodes to random values.
+%
+% data(i,m) is the value of node i in case m.
+% clamped(i,m) = 1 if node i in case m was set by intervention.
+
+if nargin < 4, usecell = 1; end
+
+ns = bnet.node_sizes;
+n = length(bnet.dag);
+if usecell
+ data = cell(n, ncases);
+else
+ data = zeros(n, ncases);
+end
+clamped = zeros(n, ncases);
+
+csubsets = subsets(1:n, max_clamp, 0); % includes the empty set
+distrib_cset = normalise(ones(1, length(csubsets)));
+
+for m=1:ncases
+ cset = csubsets{sample_discrete(distrib_cset)};
+ nvals = prod(ns(cset));
+ distrib_cvals = normalise(ones(1, nvals));
+ cvals = ind2subv(ns(cset), sample_discrete(distrib_cvals));
+ mutilated_bnet = do_intervention(bnet, cset, cvals);
+ ev = sample_bnet(mutilated_bnet);
+ if usecell
+ data(:,m) = ev;
+ else
+ data(:,m) = cell2num(ev);
+ end
+ clamped(cset,m) = 1;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_named_CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_named_CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function CPT2 = mk_named_CPT(family_names, names, dag, CPT1)
+% MK_NAMED_CPT Permute the dimensions of a CPT so they agree with the internal numbering convention
+% CPT2 = mk_named_CPT(family_names, names, dag, CPT1)
+%
+% This is best explained by example.
+% Consider the following directed acyclic graph
+%
+% C
+% / \
+% R S
+% \ /
+% W
+%
+% where all arcs point down.
+% When we create the CPT for node W, we consider S as its first parent, and R as its
+% second, and hence write
+%
+% S R W
+% CPT1(1,1,:) = [1.0 0.0];
+% CPT1(2,1,:) = [0.2 0.8]; % P(W=1 | R=1, S=2) = 0.2
+% CPT1(1,2,:) = [0.1 0.9];
+% CPT1(2,2,:) = [0.01 0.99];
+%
+% However, when we create the dag using mk_adj_mat, the nodes get topologically sorted,
+% and by chance, node R preceeds node S in this ordering.
+% Hence we should have written
+%
+% R S W
+% CPT2(1,1,:) = [1.0 0.0];
+% CPT2(2,1,:) = [0.1 0.9];
+% CPT2(1,2,:) = [0.2 0.8]; % P(W=1 | R=1, S=2) = 0.2
+% CPT2(2,2,:) = [0.01 0.99];
+%
+% Since we do not know the order of the nodes in advance, we can write
+% CPT2 = mk_named_CPT({'S', 'R', 'W'}, names, dag, CPT1)
+% where 'S', 'R', 'W' are the order of the dimensions we assumed (the child node must be last in this list),
+% and names{i} is the name of the i'th node.
+
+n = length(family_names);
+family_nums = zeros(1,n);
+for i=1:n
+ family_nums(i) = stringmatch(family_names{i}, names); % was strmatch
+end
+
+fam = family(dag, family_nums(end));
+perm = zeros(1,n);
+for i=1:n
+ % perm(i) = find(family_nums(i) == fam);
+ perm(i) = find(fam(i) == family_nums);
+end
+
+CPT2 = permute(CPT1, perm);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/mk_slice_and_half_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/mk_slice_and_half_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function bnet15 = mk_slice_and_half_dbn(bnet, int)
+% function bnet = mk_slice_and_half_dbn(bnet, int)
+% function bnet = mk_slice_and_half_dbn(bnet, int)
+%
+% Create a "1.5 slice" jtree, containing the interface nodes of slice 1
+% and all the nodes of slice 2
+% To keep the node numbering the same, we simply disconnect the non-interface nodes
+% from slice 1, and set their size to 1.
+% We do this to speed things up, and so that the likelihood is computed correctly.
+% We do not need to do
+% this if we just want to compute marginals (i.e., we can include nodes whose potentials will
+% be left as all 1s).
+
+intra15 = bnet.intra;
+ss = length(bnet.intra);
+nonint = mysetdiff(1:ss, int);
+for i=nonint(:)'
+ intra15(:,i) = 0;
+ intra15(i,:) = 0;
+ %assert(~any(bnet.inter(i,:)))
+end
+dag15 = [intra15 bnet.inter;
+ zeros(ss) bnet.intra];
+ns = bnet.node_sizes(:);
+ns(nonint) = 1; % disconnected nodes get size 1
+obs_nodes = [bnet.observed(:) bnet.observed(:)+ss];
+bnet15 = mk_bnet(dag15, ns, 'discrete', bnet.dnodes, 'equiv_class', bnet.equiv_class(:), ...
+ 'observed', obs_nodes(:));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/noisyORtoTable.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/noisyORtoTable.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function CPT = noisyORtoTable(inhibit, leak_inhibit)
+% NOISYORTOTABLE Convert noisyOR distribution to CPT
+% function CPT = noisyORtoTable(inhibit, leak_inhibit)
+%
+% inhibit(i) = prob i'th parent will be inhibited (flipped from 1 to 0)
+% leak_inhibit - optional suppression of leak
+% CPT(U1,...,Un, X) = Pr(X|U1,...,Un) where the Us are the parents (excluding leak).
+% State 1 = off, 2 = on
+
+if nargin < 2, leak_inhibit = 1; end
+
+q = [leak_inhibit inhibit(:)'];
+
+if length(q)==1
+ CPT = [q 1-q];
+ return;
+end
+
+n = length(q);
+Bn = ind2subv(2*ones(1,n), 1:(2^n))-1; % all n bit vectors, with the left most column toggling fastest (LSB)
+CPT = zeros(2^n, 2);
+% Pr(X=0 | U_1 .. U_n) = prod_{i: U_i = on} q_i = prod_i q_i ^ U_i = exp(u' * log(q_i))
+% This method is problematic when q contains zeros
+
+Q = repmat(q(:)', 2^n, 1);
+Q(logical(~Bn)) = 1;
+CPT(:,1) = prod(Q,2);
+CPT(:,2) = 1-CPT(:,1);
+
+CPT = reshape(CPT(2:2:end), 2*ones(1,n)); % skip cases in which the leak is off
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/partition_dbn_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/partition_dbn_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function [pnodes, tnodes] = partition_dbn_nodes(intra, inter)
+% PARTITION_DBN_NODES Divide the nodes into a DBN into persistent and transient.
+% [pnodes, tnodes] = partition_dbn_nodes(intra, inter)
+% Persistent nodes have children in the next time slice, transient nodes do not.
+
+ss = length(intra);
+pnodes = [];
+tnodes = [];
+for i=1:ss
+ cs = children(inter, i);
+ if isempty(cs)
+ tnodes = [tnodes i];
+ else
+ pnodes = [pnodes i];
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/partition_matrix_vec_3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/partition_matrix_vec_3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [A1, A2, B1, B2, C11, C12, C21, C22] = partition_matrix_vec_3(A, B, C, n1, n2, bs)
+
+dom = myunion(n1, n2);
+n1i = block(find_equiv_posns(n1, dom), bs(dom));
+n2i = block(find_equiv_posns(n2, dom), bs(dom));
+
+
+ A1 = A(n1i);
+ A2 = A(n2i);
+ if isempty(B)
+ B1 = zeros(size(n1i, 2),size(B, 2));
+ B2 = zeros(size(n2i, 2),size(B, 2));
+ else
+ B1 = B(n1i, :);
+ B2 = B(n2i, :);
+ end
+
+
+ C11 = C(n1i, n1i);
+ C12 = C(n1i, n2i);
+ C21 = C(n2i, n1i);
+ C22 = C(n2i, n2i);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/sample_bnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/sample_bnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function sample = sample_bnet(bnet, varargin)
+% SAMPLE_BNET Generate a random sample from a Bayes net.
+% SAMPLE = SAMPLE_BNET(BNET, ...)
+%
+% sample{i} contains the value of the i'th node.
+% i.e., the result is an Nx1 cell array.
+% Nodes are sampled in the order given by bnet.order.
+%
+% Optional arguments:
+%
+% evidence - initial evidence; if evidence{i} is non-empty, node i won't be sampled.
+
+% set defauly params
+n = length(bnet.dag);
+sample = cell(n,1);
+
+% get optional params
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'evidence', sample = args{i+1}(:);
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+end
+
+for j=bnet.order(:)'
+ if isempty(sample{j})
+ %ps = parents(bnet.dag, j);
+ ps = bnet.parents{j};
+ e = bnet.equiv_class(j);
+ sample{j} = sample_node(bnet.CPD{e}, sample(ps));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/sample_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/sample_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,73 @@
+function seq = sample_dbn(bnet, varargin)
+% SAMPLE_DBN Generate a random sequence from a DBN.
+% seq = sample_dbn(bnet, ...)
+%
+% seq{i,t} contains the values of the i'th node in the t'th slice.
+%
+% Optional arguments:
+%
+% length - length of sequence to be generated (can also just use sample_dbn(bnet,T))
+% stop_test - name of a function which is used to decide when to stop;
+% This will be called as feval(stop_test, seq(:,t))
+% i.e., stop_test is passed a cell array containing all the nodes in the current slice.
+% evidence - initial evidence; if evidence{i,t} is non-empty, this node won't be sampled.
+
+args = varargin;
+nargs = length(args);
+
+if (nargs == 1) & ~isstr(args{1})
+ % Old syntax: sample_dbn(bnet, T)
+ T = args{1};
+else
+ % get length
+ T = 1;
+ for i=1:2:nargs
+ switch args{i},
+ case 'length', T = args{i+1};
+ case 'evidence', T = size(args{i+1}, 2);
+ end
+ end
+end
+
+ss = length(bnet.intra);
+% set default arguments
+seq = cell(ss, T);
+stop_test = [];
+for i=1:2:nargs
+ switch args{i},
+ case 'evidence', seq = args{i+1}; % initialise observed nodes
+ case 'stop_test', stop_test = args{i+1};
+ end
+end
+
+t = 1;
+for i=1:ss
+ if ~isempty(stop_test) | isempty(seq{i,t})
+ ps = parents(bnet.dag, i);
+ e = bnet.equiv_class(i,1);
+ pvals = seq(ps);
+ seq{i,t} = sample_node(bnet.CPD{e}, pvals);
+ %fprintf('sample i=%d,t=%d,val=%d,ps\n', i, t, seq(i,t)); pvals(:)'
+ end
+end
+t = 2;
+done = 0;
+while ~done
+ for i=1:ss
+ if ~isempty(stop_test) | isempty(seq{i,t})
+ ps = parents(bnet.dag, i+ss) + (t-2)*ss;
+ e = bnet.equiv_class(i,2);
+ pvals = seq(ps);
+ seq{i,t} = sample_node(bnet.CPD{e}, pvals);
+ %fprintf('sample i=%d,t=%d,val=%d,ps\n', i, t, seq(i,t)); pvals(:)'
+ end
+ end
+ if ~isempty(stop_test)
+ done = feval(stop_test, seq(:,t));
+ else
+ if t==T
+ done = 1;
+ end
+ end
+ t = t + 1;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/score_bnet_complete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/score_bnet_complete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function L = log_lik_complete(bnet, cases, clamped)
+% LOG_LIK_COMPLETE Compute sum_m sum_i log P(x(i,m)| x(pi_i,m), theta_i) for a completely observed data set
+% L = log_lik_complete(bnet, cases, clamped)
+%
+% If there is a missing data, you must use an inference engine.
+% cases(i,m) is the value assigned to node i in case m.
+% (If there are vector-valued nodes, cases should be a cell array.)
+% clamped(i,m) = 1 if node i was set by intervention in case m (default: clamped = zeros)
+% Clamped nodes contribute a factor of 1.0 to the likelihood.
+
+if iscell(cases), usecell = 1; else usecell = 0; end
+
+n = length(bnet.dag);
+ncases = size(cases, 2);
+if n ~= size(cases, 1)
+ error('data should be of size nnodes * ncases');
+end
+
+if nargin < 3, clamped = zeros(n,ncases); end
+
+L = 0;
+for i=1:n
+ ps = parents(bnet.dag, i);
+ e = bnet.equiv_class(i);
+ u = find(clamped(i,:)==0);
+ L = L + log_prob_node(bnet.CPD{e}, cases(i,u), cases(ps,u));
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/shrink_obs_dims_in_gaussian.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/shrink_obs_dims_in_gaussian.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function marg2 = shrink_obs_dims_in_gaussian(marg1, dom, evidence, ns)
+% SHRINK_OBS_DIMS_IN_GAUSSIAN Remove observed dimensions from mu/Sigma
+% function marg2 = shrink_obs_dims_in_gaussian(marg1, dom, evidence, ns)
+
+% This is used by loopy
+
+hdom = dom(isemptycell(evidence(dom)));
+ndx = find_equiv_posns(hdom, dom);
+b = block(ndx, ns(dom));
+marg2.mu = marg1.mu(b);
+marg2.Sigma = marg1.Sigma(b,b);
+marg2.domain = marg1.domain;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/shrink_obs_dims_in_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/shrink_obs_dims_in_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function T2 = shrink_obs_dims_in_table(T1, dom, evidence)
+% SHRINK_OBS_DIMS_IN_TABLE Set observed dimensions to size 1
+% T2 = shrink_obs_dims_in_table(T1, dom, evidence)
+%
+% If 'T1' contains observed nodes, it will have 0s in the positions that are
+% inconsistent with the evidence. We now remove these 0s and set the corresponding dimensions to
+% size 1, to be consistent with the way most inference engines handle evidence, which is to
+% shrink observed nodes before doing inference.
+
+% This is used by pearl and enumerative inf. engines.
+
+odom = dom(~isemptycell(evidence(dom)));
+vals = cat(1,evidence{odom});
+ndx = mk_multi_index(length(dom), find_equiv_posns(odom, dom), vals(:));
+T2 = T1(ndx{:});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/solve_limid.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/solve_limid.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,68 @@
+function [strategy, MEU, niter] = solve_limid(engine, varargin)
+% SOLVE_LIMID Find the (locally) optimal strategy for a LIMID
+% [strategy, MEU, niter] = solve_limid(inf_engine, ...)
+%
+% strategy{d} = stochastic policy for node d (a decision node)
+% MEU = maximum expected utility
+% niter = num iterations used
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default in brackets]
+%
+% max_iter - max. num. iterations [ 1 ]
+% tol - tolerance required of consecutive MEU values, used to assess convergence [1e-3]
+% order - order in which decision nodes are optimized [ reverse numerical order ]
+%
+% e.g., solve_limid(engine, 'tol', 1e-2, 'max_iter', 10)
+
+bnet = bnet_from_engine(engine);
+
+% default values
+max_iter = 1;
+tol = 1e-3;
+D = bnet.decision_nodes;
+order = D(end:-1:1);
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'max_iter', max_iter = args{i+1};
+ case 'tol', tol = args{i+1};
+ case 'order', order = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+
+CPDs = bnet.CPD;
+ns = bnet.node_sizes;
+N = length(ns);
+evidence = cell(1,N);
+strategy = cell(1, N);
+
+iter = 1;
+converged = 0;
+oldMEU = 0;
+while ~converged & (iter <= max_iter)
+ for d=order(:)'
+ engine = enter_evidence(engine, evidence, 'exclude', d);
+ [m, pot] = marginal_family(engine, d);
+ %pot = marginal_family_pot(engine, d);
+ [policy, score] = upot_to_opt_policy(pot);
+ e = bnet.equiv_class(d);
+ CPDs{e} = set_fields(CPDs{e}, 'policy', policy);
+ engine = update_engine(engine, CPDs);
+ strategy{d} = policy;
+ end
+ engine = enter_evidence(engine, evidence);
+ [m, pot] = marginal_nodes(engine, []);
+ %pot = marginal_family_pot(engine, []);
+ [dummy, MEU] = upot_to_opt_policy(pot);
+ if approxeq(MEU, oldMEU, tol)
+ converged = 1;
+ end
+ oldMEU = MEU;
+ iter = iter + 1;
+end
+niter = iter - 1;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/unroll_dbn_topology.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/unroll_dbn_topology.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function M = unroll_dbn_topology(intra, inter, T, intra1)
+% UNROLL_DBN_TOPOLOGY Make the block diagonal adjacency matrix for a DBN consisting of T slices
+% M = unroll_dbn_topology(intra, inter, T, intra1)
+%
+% intra is the connectivity within a slice, inter between two slices.
+% M will have intra along the diagonal, and inter one above the diagonal.
+% intra1 is an optional argumnet, in case the intra is different for the first slice.
+
+if nargin < 4, intra1 = intra; end
+
+ss = length(intra); % slice size
+M = sparse(ss*T, ss*T);
+
+b = 1:ss;
+M(b,b) = intra1;
+M(b,b+ss) = inter;
+
+for t=2:T-1
+ b = (1:ss) + (t-1)*ss;
+ M(b,b) = intra;
+ M(b,b+ss) = inter;
+end
+
+t = T;
+b = (1:ss) + (t-1)*ss;
+M(b,b) = intra;
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/unroll_higher_order_topology.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/unroll_higher_order_topology.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function M = unroll_higher_order_topology(intra, inter, T, intra1)
+% UNROLL_DBN_TOPOLOGY Make the block diagonal adjacency matrix for a DBN consisting of T slices
+% M = unroll_dbn_topology(intra, inter, T, intra1)
+%
+% intra is the connectivity within a slice, inter between two slices.
+% M will have intra along the diagonal, and inter one above the diagonal.
+% intra1 is an optional argumnet, in case the intra is different for the first slice.
+
+if nargin < 4
+ intra1 = intra;
+end;
+
+
+ss = length(intra); % slice size
+M = sparse(ss*T, ss*T);
+[rows,columns,order] = size(inter);
+for t1 = 1:T
+ b = 1 + (t1 - 1)*ss : t1*ss;
+ if t1 == 1
+ M(b,b) = intra1;
+ else
+ M(b,b) = intra;
+ end
+ for t2 = 1:order
+ if t1 + t2 <= T
+ M(b,b+t2*ss) = inter(:,:,t2);
+ end
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/general/unroll_set.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/general/unroll_set.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function U = unroll_set(S, ss, T)
+% UNROLL_SET Make T shifted copies of the set of nodes S in a slice of size ss.
+% U = unroll_set(S, ss, T)
+
+offset = repmat(0:ss:(T-1)*ss, [length(S) 1]);
+U = repmat(S(:), [1 T]) + offset;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/bnet_from_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/get_field.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/set_fields.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/update_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/@inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+/marginal_family_pot.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/observed_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/@inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/marginal_family_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/marginal_family_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function pot = marginal_family_pot(engine, i)
+% MARGINAL_FAMILY_POT Compute the marginal on i's family and return as a potentila (inf_engine)
+% function pot = marginal_family_pot(engine,i)
+
+% This function is only called by solve_limid.
+% It requires that engine's marginal_family function return a potential.
+% This is true for jtree_inf_engine, but not for, say, jtree_ndx_inf_engine.
+% All limids must be solved using potentials,
+% but this is not true for bnets.
+
+%[m, pot] = marginal_family(engine, i);
+
+bnet = bnet_from_engine(engine);
+[m, pot] = marginal_nodes(engine, family(bnet.dag, i));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/observed_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/Old/observed_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function onodes = observed_nodes(engine)
+% OBSERVED_NODES Return nodes that are guaranteed to be observed, indep of evidence (generic inf_engine)
+% onodes = observed_nodes(engine)
+
+onodes = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/bnet_from_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/bnet_from_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function bnet = bnet_from_engine(engine)
+% BNET_FROM_ENGINE Return the bnet structure stored inside the engine (inf_engine)
+% bnet = bnet_from_engine(engine)
+
+bnet = engine.bnet;
+
+% We cannot write 'engine.bnet' without writing a 'subsref' function,
+% since engine is an object with private parts.
+% The bnet field should be the only thing external users of the engine should need access to.
+% We do not pass bnet as a separate argument, since it could get out of synch with the one
+% encoded inside the engine.
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/get_field.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/get_field.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function val = get_field(engine, name)
+% GET_FIELD Get the value of a named field from a generic engine
+% val = get_field(engine, name)
+%
+% The following fields can be accessed
+%
+% bnet
+%
+% e.g., bnet = get_field(engine, 'bnet')
+
+switch name
+ case 'bnet', val = engine.bnet;
+ otherwise,
+ error(['invalid argument name ' name]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function engine = inf_engine(bnet)
+
+engine.bnet = bnet;
+engine = class(engine, 'inf_engine');
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function m = marginal_family(engine, i, t)
+% MARGINAL_FAMILY Compute the marginal on i's family (inf_engine)
+% m = marginal_family(engine, i, t)
+%
+% t defaults to 1.
+
+if nargin < 3, t = 1; end
+
+bnet = bnet_from_engine(engine);
+if t==1
+ m = marginal_nodes(engine, family(bnet.dag, i));
+else
+ ss = length(bnet.intra);
+ fam = family(bnet.dag, i+ss);
+ if any(fam<=ss)
+ % i has a parent in the preceeding slice
+ % Hence the lowest numbered slice containing the family is t-1
+ m = marginal_nodes(engine, fam, t-1);
+ else
+ % The family all fits inside slice t
+ % Hence shift the indexes back to slice 1
+ m = marginal_nodes(engine, fam-ss, t);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/set_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/set_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function engine = set_fields(engine, varargin)
+% SET_FIELDS Set the fields for a generic engine
+% engine = set_fields(engine, name/value pairs)
+%
+% e.g., engine = set_fields(engine, 'maximize', 1)
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'maximize', engine.maximize = args{i+1};
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/@inf_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (inf_engine).
+% engine = update_engine(engine, newCPDs)
+%
+% This generic method is suitable for engines that do not process the parameters until 'enter_evidence'.
+
+engine.bnet.CPD = newCPDs;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/dummy/1.1.1.1/Sat Jan 18 22:22:22 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+A D/@inf_engine////
+A D/dynamic////
+A D/online////
+A D/static////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+/bk_ff_hmm_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_init_bel.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_marginal_from_bel.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_predict_bel.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_update_bel.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_update_bel1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@bk_ff_hmm_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/bk_ff_hmm_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/bk_ff_hmm_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function engine = bk_ff_hmm_inf_engine(bnet)
+% BK_FF_HMM_INF_ENGINE Naive (HMM-based) implementation of fully factored form of Boyen-Koller
+% engine = bk_ff_hmm_inf_engine(bnet)
+%
+% This is implemented on top of the forwards-backwards algo for HMMs,
+% so it is *less* efficient than exact inference! However, it is good for educational purposes,
+% because it illustrates the BK algorithm very clearly.
+
+[persistent_nodes, transient_nodes] = partition_dbn_nodes(bnet.intra, bnet.inter);
+assert(isequal(sort(bnet.observed), transient_nodes));
+[engine.prior, engine.transmat] = dbn_to_hmm(bnet);
+
+ss = length(bnet.intra);
+
+engine.bel = [];
+engine.bel_marginals = [];
+engine.marginals = [];
+
+
+engine = class(engine, 'bk_ff_hmm_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_init_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_init_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function engine = dbn_init_bel(engine)
+% DBN_INIT_BEL Compute the initial belief state (bk_ff_hmm)
+% engine = dbn_init_bel(engine)
+
+engine.bel = engine.prior(:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_marginal_from_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_marginal_from_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function marginal = dbn_marginal_from_bel(engine, i)
+% DBN_MARGINAL_FROM_BEL Compute the marginal on a node given the current belief state (bk_ff_hmm)
+% marginal = dbn_marginal_from_bel(engine, i)
+
+marginal = pot_to_marginal(engine.bel_marginals{i});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_predict_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_predict_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function engine = dbn_predict_bel(engine, lag)
+% DBN_PREDICT_BEL Predict the belief state 'lag' steps into the future (bk_ff_hmm)
+% engine = dbn_predict_bel(engine, lag)
+% 'lag' defaults to 1
+
+if nargin < 2, lag = 1; end
+
+for d=1:lag
+ %newbel = engine.transmat' * engine.bel;
+ newbel = normalise(engine.transmat' * engine.bel);
+
+ hnodes = engine.hnodes;
+ bnet = bnet_from_engine(engine);
+ ns = bnet.node_sizes;
+ [marginals, marginalsT] = project_joint_onto_marginals(newbel, hnodes, ns);
+ newbel = combine_marginals_into_joint(marginalsT, hnodes, ns);
+ engine.bel_marginals = marginalsT;
+ engine.bel = newbel;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function [engine, loglik] = dbn_update_bel(engine, evidence)
+% DBN_UPDATE_BEL Update the belief state (bk_ff_hmm)
+% [engine, loglik] = dbn_update_bel(engine, evidence)
+%
+% evidence{i,1} contains the evidence on node i in slice t-1
+% evidence{i,2} contains the evidence on node i in slice t
+
+oldbel = engine.bel;
+bnet = bnet_from_engine(engine);
+obslik = mk_hmm_obs_lik_vec(bnet, evidence);
+[newbel, lik] = normalise((engine.transmat' * oldbel) .* obslik);
+loglik = log(lik);
+
+hnodes = engine.hnodes;
+ns = bnet.node_sizes;
+[marginals, marginalsT] = project_joint_onto_marginals(newbel, hnodes, ns);
+newbel = combine_marginals_into_joint(marginalsT, hnodes, ns);
+engine.bel_marginals = marginalsT;
+engine.bel = newbel;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function [engine, loglik] = dbn_update_bel1(engine, evidence)
+% DBN_UPDATE_BEL Update the initial belief state (bk_ff_hmm)
+% [engine, loglik] = dbn_update_bel(engine, evidence)
+%
+% evidence{i} contains the evidence on node i in slice 1
+
+oldbel = engine.bel;
+bnet = bnet_from_engine(engine);
+obslik = mk_hmm_obs_lik_vec1(bnet, evidence);
+[newbel, lik] = normalise(oldbel .* obslik);
+loglik = log(lik);
+
+hnodes = engine.hnodes;
+ns = bnet.node_sizes;
+[marginals, marginalsT] = project_joint_onto_marginals(newbel, hnodes, ns);
+newbel = combine_marginals_into_joint(marginalsT, hnodes, ns);
+engine.bel_marginals = marginalsT;
+engine.bel = newbel;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,60 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (bk_ff_hmm)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product (not yet supported), else sum-product [0]
+% filter - if 1, do filtering, else smoothing [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+assert(~maximize);
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+onodes = bnet.observed;
+hnodes = mysetdiff(1:ss, onodes);
+T = size(evidence, 2);
+assertBNT(~any(isemptycell(evidence(onodes,:))));
+
+obslik = mk_hmm_obs_lik_mat(bnet, onodes, evidence);
+
+ns = bnet.node_sizes_slice;
+ns(onodes) = 1;
+
+[gamma, loglik, marginals, marginalsT] = bk_ff_fb(engine.prior, engine.transmat, obslik, filter, hnodes, ns);
+
+for t=1:T
+ for i=hnodes(:)'
+ engine.marginals{i,t} = pot_to_marginal(marginalsT{i,t});
+ end
+ for i=onodes(:)'
+ m.domain = i + (t-1)*ss;
+ m.T = 1;
+ engine.marginals{i,t} = m;
+ end
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function m = marginal_family(engine, i, t)
+% MARGINAL_FAMILY Compute the marginal on the specified family (bk_ff_hmm)
+% marginal = marginal_family(engine, i, t)
+
+error('bk_ff_hmm doesn''t support marginal_family');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function marginal = marginal_nodes(engine, nodes, t)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (bk_ff_hmm)
+% marginal = marginal_nodes(engine, i, t)
+
+assert(length(nodes)==1);
+i = nodes(end);
+%assert(myismember(i, engine.hnodes));
+marginal = engine.marginals{i,t};
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+marginal.domain = i + (t-1)*ss;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+/bk_ff_fb.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/combine_marginals_into_joint.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_to_hmm.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/mk_hmm_obs_lik_mat.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/mk_hmm_obs_lik_vec.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/mk_hmm_obs_lik_vec1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/project_joint_onto_marginals.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/bk_ff_fb.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/bk_ff_fb.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function [gamma, loglik, marginals, marginalsT] = bk_ff_fb(prior, transmat, obslik, filter_only, hnodes, ns)
+% BK_FF_FB Fully factored Boyen-Koller version of forwards-backwards
+% [gamma, loglik, marginals, marginalsT] = bk_ff_hmm(prior, transmat, obslik, filter_only, hnodes, ns)
+
+ss = length(ns);
+S = length(prior);
+T = size(obslik, 2);
+marginals = cell(ss,T);
+marginalsT = cell(ss,T);
+scale = zeros(1,T);
+alpha = zeros(S, T);
+
+transmat2 = transmat';
+for t=1:T
+ if t==1
+ [alpha(:,t), scale(t)] = normalise(prior(:) .* obslik(:,t));
+ else
+ [alpha(:,t), scale(t)] = normalise((transmat2 * alpha(:,t-1)) .* obslik(:,t));
+ end
+ [marginals(:,t), marginalsT(:,t)] = project_joint_onto_marginals(alpha(:,t), hnodes, ns);
+ alpha(:,t) = combine_marginals_into_joint(marginalsT(:,t), hnodes, ns);
+ %fprintf('alpha t=%d\n', t);
+ %celldisp(marginals(1:8,t))
+end
+loglik = sum(log(scale));
+
+if filter_only
+ gamma = alpha;
+ return;
+end
+
+beta = zeros(S,T);
+gamma = zeros(S,T);
+t = T;
+beta(:,t) = ones(S,1);
+gamma(:,t) = normalise(alpha(:,t) .* beta(:,t));
+[marginals(:,t), marginalsT(:,t)] = project_joint_onto_marginals(gamma(:,t), hnodes, ns);
+
+for t=T-1:-1:1
+ b = beta(:,t+1) .* obslik(:,t+1);
+ beta(:,t) = normalise((transmat * b));
+ [junk, tempT] = project_joint_onto_marginals(beta(:,t), hnodes, ns);
+ beta(:,t) = combine_marginals_into_joint(tempT, hnodes, ns);
+ %gamma(:,t) = normalise(alpha(:,t) .* beta(:,t));
+ %[marginals(:,t), marginalsT(:,t)] = project_joint_onto_marginals(gamma(:,t), hnodes, ns);
+end
+
+gamma2 = zeros(S,T);
+for t=T-1:-1:1
+ b = beta(:,t+1) .* obslik(:,t+1);
+ xi(:,:,t) = normalise((transmat .* (alpha(:,t) * b')));
+ if t==T-1
+ gamma2(:,T) = sum(xi(:,:,T-1), 1)';
+ end
+ gamma2(:,t) = sum(xi(:,:,t), 2);
+ [marginals(:,t), marginalsT(:,t)] = project_joint_onto_marginals(gamma2(:,t), hnodes, ns);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/combine_marginals_into_joint.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/combine_marginals_into_joint.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function joint = combine_marginals_into_joint(marginalsT, hnodes, ns)
+
+jointT = dpot(hnodes, ns(hnodes));
+for i=hnodes(:)'
+ jointT = multiply_by_pot(jointT, marginalsT{i});
+end
+m = pot_to_marginal(jointT);
+joint = m.T(:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/dbn_to_hmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/dbn_to_hmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function [prior, transmat] = dbn_to_hmm(bnet)
+% DBN_TO_HMM Compute the discrete HMM matrices from a simple DBN
+% [prior, transmat] = dbn_to_hmm(bnet)
+
+onodes = bnet.observed;
+ss = length(bnet.intra);
+evidence = cell(1,2*ss);
+hnodes = mysetdiff(1:ss, onodes);
+prior = multiply_CPTs(bnet, [], hnodes, evidence);
+transmat = multiply_CPTs(bnet, hnodes, hnodes+ss, evidence);
+%obsmat1 = multiply_CPTs(bnet, hnodes, onodes, evidence);
+%obsmat = multiply_CPTs(bnet, hnodes+ss, onodes+ss, evidence);
+%obsmat1 = obsmat if the observation matrices are tied across slices
+
+
+
+%%%%%%%%%%%%
+
+function mat = multiply_CPTs(bnet, pdom, cdom, evidence)
+
+% MULTIPLY_CPTS Make a matrix Pr(Y|X), where X represents all the parents, and Y all the children
+% We assume the children have no intra-connections.
+%
+% e.g., Consider the DBN with interconnectivity i->i', j->j',k', k->i',k'
+% Then transition matrix = Pr(i,j,k -> i',j',k') = Pr(i,k->i') Pr(j->j') Pr(j,k->k')
+
+dom = [pdom cdom];
+ns = bnet.node_sizes;
+bigpot = dpot(dom, ns(dom));
+for j=cdom(:)'
+ e = bnet.equiv_class(j);
+ fam = family(bnet.dag, j);
+ pot = convert_to_pot(bnet.CPD{e}, 'd', fam(:), evidence);
+ bigpot = multiply_by_pot(bigpot, pot);
+end
+psize = prod(ns(pdom));
+csize = prod(ns(cdom));
+T = pot_to_marginal(bigpot);
+mat = reshape(T.T, [psize csize]);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_mat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_mat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function obslik = mk_hmm_obs_lik_mat(bnet, onodes, evidence)
+% MK_HMM_OBS_LIK_MAT Make the observation likelihood matrix for all slices
+% obslik = mk_hmm_obs_lik_mat(bnet, onodes, evidence)
+%
+% obslik(i,t) = Pr(Y(t) | X(t)=i)
+
+[ss T] = size(evidence);
+
+hnodes = mysetdiff(1:ss, onodes);
+ns = bnet.node_sizes_slice;
+ns(onodes) = 1;
+Q = prod(ns(hnodes));
+obslik = zeros(Q,T);
+
+dom = 1:ss;
+for t=1:T
+ bigpot = dpot(dom, ns(dom));
+ for i=onodes(:)'
+ if t==1
+ e = bnet.equiv_class(i,1);
+ fam = family(bnet.dag, i);
+ else
+ e = bnet.equiv_class(i,2);
+ fam = family(bnet.dag, i, 2) + ss*(t-2);
+ end
+ pot = convert_to_pot(bnet.CPD{e}, 'd', fam(:), evidence);
+ pot = set_domain_pot(pot, family(bnet.dag, i));
+ bigpot = multiply_by_pot(bigpot, pot);
+ end
+ m = pot_to_marginal(bigpot);
+ obslik(:,t) = m.T(:);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function obslik = mk_hmm_obs_lik_vec(bnet, evidence)
+% MK_HMM_OBS_LIK_VEC Make the observation likelihood vector for one slice
+% obslik = mk_obs_lik(bnet, evidence)
+%
+% obslik(i) = Pr(y(t) | X(t)=i)
+% evidence{i,1} contains the evidence on node i in slice t-1
+% evidence{i,2} contains the evidence on node i in slice t
+
+ns = bnet.node_sizes;
+ss = length(bnet.intra);
+onodes = find(~isemptycell(evidence(:)));
+hnodes = find(isemptycell(evidence(:)));
+ens = ns;
+ens(onodes) = 1;
+Q = prod(ens(hnodes));
+obslik = zeros(1,Q);
+dom = (1:ss)+ss;
+bigpot = dpot(dom, ens(dom));
+onodes1 = find(~isemptycell(evidence(:,1)));
+for i=onodes1(:)'
+ e = bnet.equiv_class(i,2);
+ fam = family(bnet.dag, i, 2);
+ pot = convert_to_pot(bnet.CPD{e}, 'd', fam, evidence);
+ bigpot = multiply_by_pot(bigpot, pot);
+end
+m = pot_to_marginal(bigpot);
+obslik = m.T(:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function obslik = mk_hmm_obs_lik_vec1(bnet, evidence)
+% MK_HMM_OBS_LIK_VEC1 Make the observation likelihood vector for the first slice
+% obslik = mk_hmm_obs_lik_vec1(engine, evidence)
+%
+% obslik(i) = Pr(y(1) | X(1)=i)
+% evidence{i} contains the evidence on node i in slice 1
+
+ns = bnet.node_sizes;
+ss = length(ns);
+onodes = find(~isemptycell(evidence(:)));
+hnodes = find(isemptycell(evidence(:)));
+ens = ns;
+ens(onodes) = 1;
+Q = prod(ens(hnodes));
+obslik = zeros(1,Q);
+dom = (1:ss);
+bigpot = dpot(dom, ens(dom));
+for i=onodes(:)'
+ e = bnet.equiv_class(i,1);
+ fam = family(bnet.dag, i);
+ pot = convert_to_pot(bnet.CPD{e}, 'd', fam(:), evidence);
+ bigpot = multiply_by_pot(bigpot, pot);
+end
+m = pot_to_marginal(bigpot);
+obslik = m.T(:);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/project_joint_onto_marginals.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_ff_hmm_inf_engine/private/project_joint_onto_marginals.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function [marginals, marginalsT] = project_joint_onto_marginals(joint, hnodes, ns)
+
+ss = length(ns);
+jointT = dpot(hnodes, ns(hnodes), joint);
+marginalsT = cell(1, ss);
+marginals = cell(1,ss);
+for i=hnodes(:)'
+ marginalsT{i} = marginalize_pot(jointT, i);
+ m = pot_to_marginal(marginalsT{i});
+ marginals{i} = m.T(:);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+/bk_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_init_bel.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_marginal_from_bel.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_update_bel.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dbn_update_bel1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence.m/1.1.1.1/Sat Jan 11 18:13:50 2003//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/update_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@bk_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/bk_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/bk_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,107 @@
+function engine = bk_inf_engine(bnet, varargin)
+% BK_INF_ENGINE Boyen-Koller approximate inference algorithm for DBNs.
+%
+% In the BK algorithm, the belief state is represented as a product of marginals,
+% even though the factors may not be independent.
+%
+% engine = bk_inf_engine(bnet, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% clusters - if a cell array, clusters{i} specifies the terms in the i'th factor.
+% - 'exact' means create one cluster that contains all the nodes in a slice [exact]
+% - 'ff' means create one cluster per node (ff = fully factorised).
+%
+%
+% For details, see
+% - "Tractable Inference for Complex Stochastic Processes", X. Boyen and D. Koller, UAI 98.
+% - "Approximate learning of dynamic models", X. Boyen and D. Koller, NIPS 98.
+% (The UAI98 paper discusses filtering and theory, and the NIPS98 paper discusses smoothing.)
+
+ss = length(bnet.intra);
+% set default params
+clusters = 'exact';
+
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'clusters', clusters = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+ end
+end
+
+if strcmp(clusters, 'exact')
+ %clusters = { compute_interface_nodes(bnet.intra, bnet.inter) };
+ clusters = { 1:ss };
+elseif strcmp(clusters, 'ff')
+ clusters = num2cell(1:ss);
+end
+
+
+% We need to insert the prior on the clusters in slice 1,
+% and extract the posterior on the clusters in slice 2.
+C = length(clusters);
+clusters2 = cell(1,2*C);
+clusters2(1:C) = clusters;
+for c=1:C
+ clusters2{c+C} = clusters{c} + ss;
+end
+
+onodes = bnet.observed;
+obs_nodes = [onodes(:) onodes(:)+ss];
+engine.sub_engine = jtree_inf_engine(bnet, 'clusters', clusters2);
+
+engine.clq_ass_to_cluster = zeros(C, 2);
+for c=1:C
+ engine.clq_ass_to_cluster(c,1) = clq_containing_nodes(engine.sub_engine, clusters{c});
+ engine.clq_ass_to_cluster(c,2) = clq_containing_nodes(engine.sub_engine, clusters{c}+ss);
+end
+engine.clusters = clusters;
+
+engine.clq_ass_to_node = zeros(ss, 2);
+for i=1:ss
+ engine.clq_ass_to_node(i, 1) = clq_containing_nodes(engine.sub_engine, i);
+ engine.clq_ass_to_node(i, 2) = clq_containing_nodes(engine.sub_engine, i+ss);
+end
+
+
+
+% Also create an engine just for slice 1
+bnet1 = mk_bnet(bnet.intra1, bnet.node_sizes_slice, 'discrete', myintersect(bnet.dnodes, 1:ss), ...
+ 'equiv_class', bnet.equiv_class(:,1), 'observed', onodes);
+for i=1:max(bnet1.equiv_class)
+ bnet1.CPD{i} = bnet.CPD{i};
+end
+
+engine.sub_engine1 = jtree_inf_engine(bnet1, 'clusters', clusters);
+
+engine.clq_ass_to_cluster1 = zeros(1,C);
+for c=1:C
+ engine.clq_ass_to_cluster1(c) = clq_containing_nodes(engine.sub_engine1, clusters{c});
+end
+
+engine.clq_ass_to_node1 = zeros(1, ss);
+for i=1:ss
+ engine.clq_ass_to_node1(i) = clq_containing_nodes(engine.sub_engine1, i);
+end
+
+engine.clpot = []; % this is where we store the results between enter_evidence and marginal_nodes
+engine.filter = [];
+engine.maximize = [];
+engine.T = [];
+
+engine.bel = [];
+engine.bel_clpot = [];
+engine.slice1 = [];
+%engine.pot_type = 'cg';
+% hack for online inference so we can cope with hidden Gaussians and discrete
+% it will not affect the pot type used in enter_evidence
+engine.pot_type = determine_pot_type(bnet, onodes);
+
+engine = class(engine, 'bk_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/dbn_init_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/dbn_init_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function engine = dbn_init_bel(engine)
+% DBN_INIT_BEL Compute the initial belief state (bk)
+% engine = dbn_init_bel(engine))
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+evidence = cell(1,ss);
+engine = dbn_update_bel1(engine, evidence);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/dbn_marginal_from_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/dbn_marginal_from_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function marginal = dbn_marginal_from_bel(engine, i)
+% DBN_MARGINAL_FROM_BEL Compute the marginal on a node given the current belief state (bk)
+% marginal = dbn_marginal_from_bel(engine, i)
+
+if engine.slice1
+ j = i;
+ c = clq_containing_nodes(engine.sub_engine1, j);
+else
+ bnet = bnet_from_engine(engine);
+ ss = length(bnet.intra);
+ j = i+ss;
+ c = clq_containing_nodes(engine.sub_engine, j);
+end
+assert(c >= 1);
+bigpot = engine.bel_clpot{c};
+
+pot = marginalize_pot(bigpot, j);
+marginal = pot_to_marginal(pot);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/dbn_update_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/dbn_update_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+function [engine, loglik] = dbn_update_bel(engine, evidence)
+% DBN_UPDATE_BEL Update the belief state (bk)
+% [engine, loglik] = dbn_update_bel(engine, evidence)
+%
+% evidence{i,1} contains the evidence on node i in slice t-1
+% evidence{i,2} contains the evidence on node i in slice t
+
+oldbel = engine.bel;
+
+ss = size(evidence, 1);
+bnet = bnet_from_engine(engine);
+CPDpot = cell(1, ss);
+for n=1:ss
+ fam = family(bnet.dag, n, 2);
+ e = bnet.equiv_class(n, 2);
+ CPDpot{n} = convert_to_pot(bnet.CPD{e}, engine.pot_type, fam(:), evidence);
+end
+
+observed = ~isemptycell(evidence);
+onodes2 = find(observed(:));
+clqs = [engine.clq_ass_to_cluster(:,1); engine.clq_ass_to_node(:,2)];
+pots = [oldbel(:); CPDpot(:)];
+
+[clpot, loglik] = enter_soft_evidence(engine.sub_engine, clqs, pots, onodes2(:), engine.pot_type);
+
+C = length(engine.clusters);
+newbel = cell(1,C);
+for c=1:C
+ k = engine.clq_ass_to_cluster(c,2);
+ cl = engine.clusters{c};
+ newbel{c} = marginalize_pot(clpot{k}, cl+ss); % extract slice 2 posterior
+ newbel{c} = set_domain_pot(newbel{c}, cl); % shift back to slice 1 for re-use as prior
+end
+
+engine.bel = newbel;
+engine.bel_clpot = clpot;
+engine.slice1 = 0;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/dbn_update_bel1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/dbn_update_bel1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function [engine, loglik] = dbn_update_bel1(engine, evidence)
+% DBN_UPDATE_BEL1 Update the initial belief state (bk)
+% engine = dbn_update_bel1(engine, evidence)
+%
+% evidence{i} has the evidence on node i for slice 1
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+CPDpot = cell(1,ss);
+t = 1;
+for n=1:ss
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n, 1);
+ CPDpot{n} = convert_to_pot(bnet.CPD{e}, engine.pot_type, fam(:), evidence);
+end
+
+onodes = find(~isemptycell(evidence));
+
+[clpot, loglik] = enter_soft_evidence(engine.sub_engine1, engine.clq_ass_to_node1, CPDpot, onodes, engine.pot_type);
+
+C = length(engine.clusters);
+newbel = cell(1,C);
+for c=1:C
+ k = engine.clq_ass_to_cluster1(c);
+ newbel{c} = marginalize_pot(clpot{k}, engine.clusters{c});
+end
+
+engine.bel = newbel;
+engine.bel_clpot = clpot;
+engine.slice1 = 1;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (bk)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product instead of sum-product [0]
+% filter - if 1, do filtering, else smoothing [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+[ss T] = size(evidence);
+engine.filter = filter;
+engine.maximize = maximize;
+engine.T = T;
+
+if maximize
+ error('BK does not yet support max propagation')
+ % because it calls enter_soft_evidence, not enter_evidence
+end
+
+observed_bitv = ~isemptycell(evidence);
+onodes = find(observed_bitv);
+bnet = bnet_from_engine(engine);
+pot_type = determine_pot_type(bnet, onodes);
+CPDpot = convert_dbn_CPDs_to_pots(bnet, evidence, pot_type);
+[engine.clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed_bitv, pot_type, filter);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,88 @@
+function [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (bk)
+% [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+[ss T] = size(CPDpot);
+C = length(engine.clusters);
+Q = length(cliques_from_engine(engine.sub_engine));
+Q1 = length(cliques_from_engine(engine.sub_engine1));
+clpot = cell(Q,T);
+alpha = cell(C,T);
+
+% Forwards
+% The method is a generalization of the following HMM equation:
+% alpha(j,t) = normalise( (sum_i alpha(i,t-1) * transmat(i,j)) * obsmat(j,t) )
+% where alpha(j,t) = Pr(Q(t)=j | y(1:t))
+t = 1;
+[clpot(1:Q1,t), logscale(t)] = enter_soft_evidence(engine.sub_engine1, engine.clq_ass_to_node1(:), ...
+ CPDpot(:,1), find(observed(:,1)), pot_type);
+for c=1:C
+ k = engine.clq_ass_to_cluster1(c);
+ alpha{c,t} = marginalize_pot(clpot{k,t}, engine.clusters{c});
+end
+% For filtering, clpot{1} contains evidence on slice 1 only
+
+%fprintf('alphas t=%d\n', t);
+%for c=1:8
+% temp = pot_to_marginal(alpha{c,t});
+% temp.T
+%end
+
+% clpot{t} contains evidence from slices t-1, t for t > 1
+clqs = [engine.clq_ass_to_cluster(:,1); engine.clq_ass_to_node(:,2)];
+for t=2:T
+ pots = [alpha(:,t-1); CPDpot(:,t)];
+ [clpot(:,t), logscale(t)] = enter_soft_evidence(engine.sub_engine, clqs, pots, find(observed(:,t-1:t)), pot_type);
+ for c=1:C
+ k = engine.clq_ass_to_cluster(c,2);
+ cl = engine.clusters{c};
+ alpha{c,t} = marginalize_pot(clpot{k,t}, cl+ss); % extract slice 2 posterior
+ alpha{c,t} = set_domain_pot(alpha{c,t}, cl); % shift back to slice 1 for re-use as prior
+ end
+
+end
+
+loglik = sum(logscale);
+
+if filter
+ return;
+end
+
+% Backwards
+% The method is a generalization of the following HMM equation:
+% beta(i,t) = (sum_j transmat(i,j) * obsmat(j,t+1) * beta(j,t+1))
+% where beta(i,t) = Pr(y(t+1:T) | Q(t)=i)
+t = T;
+bnet = bnet_from_engine(engine);
+beta = cell(C,T);
+for c=1:C
+ beta{c,t} = mk_initial_pot(pot_type, engine.clusters{c} + ss, bnet.node_sizes(:), bnet.cnodes(:), ...
+ find(observed(:,t-1:t)));
+end
+for t=T-1:-1:1
+ clqs = [engine.clq_ass_to_cluster(:,2); engine.clq_ass_to_node(:,2)];
+ pots = [beta(:,t+1); CPDpot(:,t+1)];
+ temp = enter_soft_evidence(engine.sub_engine, clqs, pots, find(observed(:,t:t+1)), pot_type);
+ for c=1:C
+ k = engine.clq_ass_to_cluster(c,1);
+ cl = engine.clusters{c};
+ beta{c,t} = marginalize_pot(temp{k}, cl); % extract slice 1
+ beta{c,t} = set_domain_pot(beta{c,t}, cl + ss); % shift fwd to slice 2
+ end
+end
+
+% Combine
+% The method is a generalization of the following HMM equation:
+% xi(i,j,t) = normalise( alpha(i,t) * transmat(i,j) * obsmat(j,t+1) * beta(j,t+1) )
+% where xi(i,j,t) = Pr(Q(t)=i, Q(t+1)=j | y(1:T))
+for t=1:T-1
+ clqs = [engine.clq_ass_to_cluster(:); engine.clq_ass_to_node(:,2)];
+ pots = [alpha(:,t); beta(:,t+1); CPDpot(:,t+1)];
+ clpot(:,t+1) = enter_soft_evidence(engine.sub_engine, clqs, pots, find(observed(:,t:t+1)), pot_type);
+end
+% for smoothing, clpot{1} is undefined
+for k=1:Q1
+ clpot{k,1} = [];
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function m = marginal_family(engine, i, t)
+% MARGINAL_FAMILY Compute the marginal on the specified family (bk)
+% marginal = marginal_family(engine, i, t)
+
+% This is just like inf_engine/marginal_family, except when we call
+% marginal_nodes, we provide a 4th argument, to tell it's a family.
+
+if nargin < 3, t = 1; end
+
+bnet = bnet_from_engine(engine);
+if t==1
+ m = marginal_nodes(engine, family(bnet.dag, i), t, 1);
+else
+ ss = length(bnet.intra);
+ fam = family(bnet.dag, i+ss);
+ if any(fam<=ss)
+ % i has a parent in the preceeding slice
+ % Hence the lowest numbered slice containing the family is t-1
+ m = marginal_nodes(engine, fam, t-1, 1);
+ else
+ % The family all fits inside slice t
+ % Hence shift the indexes back to slice 1
+ m = marginal_nodes(engine, fam-ss, t, 1);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,67 @@
+function marginal = marginal_nodes(engine, nodes, t, fam)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (bk)
+%
+% marginal = marginal_nodes(engine, i, t)
+% returns Pr(X(i,t) | Y(1:T)), where X(i,t) is the i'th node in the t'th slice.
+% If enter_evidence used filtering instead of smoothing, this will return Pr(X(i,t) | Y(1:t)).
+%
+% marginal = marginal_nodes(engine, query, t)
+% returns Pr(X(query(1),t), ... X(query(end),t) | Y(1:T)),
+% where X(q,t) is the q'th node in the t'th slice. If q > ss (slice size), this is equal
+% to X(q mod ss, t+1). That is, 't' specifies the time slice of the earliest node.
+% 'query' cannot span more than 2 time slices.
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3.
+
+if nargin < 3, t = 1; end
+if nargin < 4, fam = 0; else fam = 1; end
+
+
+% clpot{t} contains slice t-1 and t
+% Example
+% clpot #: 1 2 3
+% slices: 1 1,2 2,3
+% For filtering, we must take care not to take future evidence into account.
+% For smoothing, clpot{1} does not exist.
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+
+nodes2 = nodes;
+if ~engine.filter
+ if t < engine.T
+ slice = t+1;
+ else % earliest t is T, so all nodes fit in one slice
+ slice = engine.T;
+ nodes2 = nodes + ss;
+ end
+else
+ if t == 1
+ slice = 1;
+ else
+ if all(nodes<=ss)
+ slice = t;
+ nodes2 = nodes + ss;
+ elseif t == engine.T
+ slice = t;
+ else
+ slice = t + 1;
+ end
+ end
+end
+
+if engine.filter & t==1
+ c = clq_containing_nodes(engine.sub_engine1, nodes2, fam);
+else
+ c = clq_containing_nodes(engine.sub_engine, nodes2, fam);
+end
+assert(c >= 1);
+bigpot = engine.clpot{c, slice};
+
+pot = marginalize_pot(bigpot, nodes2);
+marginal = pot_to_marginal(pot);
+
+% we convert the domain to the unrolled numbering system
+% so that update_ess extracts the right evidence.
+marginal.domain = nodes+(t-1)*ss;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@bk_inf_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (bk)
+% engine = update_engine(engine, newCPDs)
+
+engine.inf_engine = update_engine(engine.inf_engine, newCPDs);
+engine.sub_engine = update_engine(engine.sub_engine, newCPDs);
+
+bnet = bnet_from_engine(engine);
+eclass1 = bnet.equiv_class(:,1);
+engine.sub_engine1 = update_engine(engine.sub_engine1, newCPDs(1:max(eclass1)));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+/cbk_inf_engine.m/1.1.1.1/Mon Nov 22 22:15:34 2004//
+/dbn_init_bel.m/1.1.1.1/Tue Jul 29 02:44:58 2003//
+/dbn_marginal_from_bel.m/1.1.1.1/Tue Jul 29 02:44:58 2003//
+/dbn_update_bel.m/1.1.1.1/Tue Jul 29 02:44:58 2003//
+/dbn_update_bel1.m/1.1.1.1/Tue Jul 29 02:44:58 2003//
+/enter_evidence.m/1.1.1.1/Mon Jan 12 20:53:54 2004//
+/enter_soft_evidence.m/1.1.1.1/Wed Feb 4 07:42:38 2004//
+/junk/1.1.1.1/Wed Nov 24 20:12:38 2004//
+/marginal_family.m/1.1.1.1/Tue Jul 29 02:44:58 2003//
+/marginal_nodes.m/1.1.1.1/Tue Dec 16 06:17:18 2003//
+/update_engine.m/1.1.1.1/Tue Jul 29 02:44:58 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@cbk_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/cbk_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/cbk_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,175 @@
+function engine = cbk_inf_engine(bnet, varargin)
+% Just the same as bk_inf_engine, but you can specify overlapping clusters.
+
+ss = length(bnet.intra);
+% set default params
+clusters = 'exact';
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'clusters', clusters = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+ end
+end
+
+if strcmp(clusters, 'exact')
+ %clusters = { compute_interface_nodes(bnet.intra, bnet.inter) };
+ clusters = { 1:ss };
+elseif strcmp(clusters, 'ff')
+ clusters = num2cell(1:ss);
+end
+
+
+% We need to insert the prior on the clusters in slice 1,
+% and extract the posterior on the clusters in slice 2.
+% We don't need to care about the separators, b/c they're subsets of the clusters.
+C = length(clusters);
+clusters2 = cell(1,2*C);
+clusters2(1:C) = clusters;
+for c=1:C
+ clusters2{c+C} = clusters{c} + ss;
+end
+
+onodes = bnet.observed;
+obs_nodes = [onodes(:) onodes(:)+ss];
+engine.sub_engine = jtree_inf_engine(bnet, 'clusters', clusters2);
+
+%FH >>>
+%Compute separators.
+ns = bnet.node_sizes(:,1);
+ns(onodes) = 1;
+[clusters, separators] = build_jt(clusters, 1:length(ns), ns);
+S = length(separators);
+engine.separators = separators;
+
+%Compute size of clusters.
+cl_sizes = zeros(1,C);
+for c=1:C
+ cl_sizes(c) = prod(ns(clusters{c}));
+end
+
+%Assign separators to the smallest cluster subsuming them.
+engine.cluster_ass_to_separator = zeros(S, 1);
+for s=1:S
+ subsuming_clusters = [];
+ %find smallest cluster containing s
+ for c=1:C
+ if mysubset(separators{s}, clusters{c})
+ subsuming_clusters(end+1) = c;
+ end
+ end
+ c = argmin(cl_sizes(subsuming_clusters));
+ engine.cluster_ass_to_separator(s) = subsuming_clusters(c);
+end
+
+%<<< FH
+
+engine.clq_ass_to_cluster = zeros(C, 2);
+for c=1:C
+ engine.clq_ass_to_cluster(c,1) = clq_containing_nodes(engine.sub_engine, clusters{c});
+ engine.clq_ass_to_cluster(c,2) = clq_containing_nodes(engine.sub_engine, clusters{c}+ss);
+end
+engine.clusters = clusters;
+
+engine.clq_ass_to_node = zeros(ss, 2);
+for i=1:ss
+ engine.clq_ass_to_node(i, 1) = clq_containing_nodes(engine.sub_engine, i);
+ engine.clq_ass_to_node(i, 2) = clq_containing_nodes(engine.sub_engine, i+ss);
+end
+
+
+
+% Also create an engine just for slice 1
+bnet1 = mk_bnet(bnet.intra1, bnet.node_sizes_slice, 'discrete', myintersect(bnet.dnodes, 1:ss), ...
+ 'equiv_class', bnet.equiv_class(:,1), 'observed', onodes);
+for i=1:max(bnet1.equiv_class)
+ bnet1.CPD{i} = bnet.CPD{i};
+end
+
+engine.sub_engine1 = jtree_inf_engine(bnet1, 'clusters', clusters);
+
+engine.clq_ass_to_cluster1 = zeros(1,C);
+for c=1:C
+ engine.clq_ass_to_cluster1(c) = clq_containing_nodes(engine.sub_engine1, clusters{c});
+end
+
+engine.clq_ass_to_node1 = zeros(1, ss);
+for i=1:ss
+ engine.clq_ass_to_node1(i) = clq_containing_nodes(engine.sub_engine1, i);
+end
+
+engine.clpot = []; % this is where we store the results between enter_evidence and marginal_nodes
+engine.filter = [];
+engine.maximize = [];
+engine.T = [];
+
+engine.bel = [];
+engine.bel_clpot = [];
+engine.slice1 = [];
+%engine.pot_type = 'cg';
+% hack for online inference so we can cope with hidden Gaussians and discrete
+% it will not affect the pot type used in enter_evidence
+engine.pot_type = determine_pot_type(bnet, onodes);
+
+engine = class(engine, 'cbk_inf_engine', inf_engine(bnet));
+
+
+
+
+function [cliques, seps, jt_size] = build_jt(cliques, vars, ns)
+% BUILD_JT connects the cliques into a jtree, computes the respective
+% separators and the size of the resulting jtree.
+%
+% [cliques, seps, jt_size] = build_jt(cliques, vars, ns)
+% ns(i) has to hold the size of vars(i)
+% vars has to be a superset of the union of cliques.
+
+%======== Compute the jtree with tool from BNT. This wants the vars to be 1:N.
+%==== Map from nodes to their indices.
+%disp('Computing jtree for cliques with vars and ns:');
+%cliques
+%vars
+%ns'
+
+inv_nodes = sparse(1,max(vars));
+N = length(vars);
+for i=1:N
+ inv_nodes(vars(i)) = i;
+end
+
+tmp_cliques = cell(1,length(cliques));
+%==== Temporarily map clique vars to their indices.
+for i=1:length(cliques)
+ tmp_cliques{i} = inv_nodes(cliques{i});
+end
+
+%=== Compute the jtree, using BNT.
+[jtree, root, B, w] = cliques_to_jtree(tmp_cliques, ns);
+
+
+%======== Now, compute the separators between connected cliques and their weights.
+seps = {};
+s_w = [];
+[is,js] = find(jtree > 0);
+for k=1:length(is)
+ i = is(k); j = js(k);
+ sep = vars(find(B(i,:) & B(j,:))); % intersect(cliques{i}, cliques{j});
+ if i>j | length(sep) == 0, continue; end;
+ seps{end+1} = sep;
+ s_w(end+1) = prod(ns(inv_nodes(seps{end})));
+end
+
+cl_w = sum(w);
+sep_w = sum(s_w);
+assert(cl_w > sep_w, 'Weight of cliques must be bigger than weight of separators');
+
+jt_size = cl_w + sep_w;
+% jt.cliques = cliques;
+% jt.seps = seps;
+% jt.size = jt_size;
+% jt.ns = ns';
+% jt;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/dbn_init_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/dbn_init_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function engine = dbn_init_bel(engine)
+% DBN_INIT_BEL Compute the initial belief state (bk)
+% engine = dbn_init_bel(engine))
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+evidence = cell(1,ss);
+engine = dbn_update_bel1(engine, evidence);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/dbn_marginal_from_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/dbn_marginal_from_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function marginal = dbn_marginal_from_bel(engine, i)
+% DBN_MARGINAL_FROM_BEL Compute the marginal on a node given the current belief state (bk)
+% marginal = dbn_marginal_from_bel(engine, i)
+
+if engine.slice1
+ j = i;
+ c = clq_containing_nodes(engine.sub_engine1, j);
+else
+ bnet = bnet_from_engine(engine);
+ ss = length(bnet.intra);
+ j = i+ss;
+ c = clq_containing_nodes(engine.sub_engine, j);
+end
+assert(c >= 1);
+bigpot = engine.bel_clpot{c};
+
+pot = marginalize_pot(bigpot, j);
+marginal = pot_to_marginal(pot);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/dbn_update_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/dbn_update_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+function [engine, loglik] = dbn_update_bel(engine, evidence)
+% DBN_UPDATE_BEL Update the belief state (bk)
+% [engine, loglik] = dbn_update_bel(engine, evidence)
+%
+% evidence{i,1} contains the evidence on node i in slice t-1
+% evidence{i,2} contains the evidence on node i in slice t
+
+oldbel = engine.bel;
+
+ss = size(evidence, 1);
+bnet = bnet_from_engine(engine);
+CPDpot = cell(1, ss);
+for n=1:ss
+ fam = family(bnet.dag, n, 2);
+ e = bnet.equiv_class(n, 2);
+ CPDpot{n} = convert_to_pot(bnet.CPD{e}, engine.pot_type, fam(:), evidence);
+end
+
+observed = ~isemptycell(evidence);
+onodes2 = find(observed(:));
+clqs = [engine.clq_ass_to_cluster(:,1); engine.clq_ass_to_node(:,2)];
+pots = [oldbel(:); CPDpot(:)];
+
+[clpot, loglik] = enter_soft_evidence(engine.sub_engine, clqs, pots, onodes2(:), engine.pot_type);
+
+C = length(engine.clusters);
+newbel = cell(1,C);
+for c=1:C
+ k = engine.clq_ass_to_cluster(c,2);
+ cl = engine.clusters{c};
+ newbel{c} = marginalize_pot(clpot{k}, cl+ss); % extract slice 2 posterior
+ newbel{c} = set_domain_pot(newbel{c}, cl); % shift back to slice 1 for re-use as prior
+end
+
+engine.bel = newbel;
+engine.bel_clpot = clpot;
+engine.slice1 = 0;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/dbn_update_bel1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/dbn_update_bel1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function [engine, loglik] = dbn_update_bel1(engine, evidence)
+% DBN_UPDATE_BEL1 Update the initial belief state (bk)
+% engine = dbn_update_bel1(engine, evidence)
+%
+% evidence{i} has the evidence on node i for slice 1
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+CPDpot = cell(1,ss);
+t = 1;
+for n=1:ss
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n, 1);
+ CPDpot{n} = convert_to_pot(bnet.CPD{e}, engine.pot_type, fam(:), evidence);
+end
+
+onodes = find(~isemptycell(evidence));
+
+[clpot, loglik] = enter_soft_evidence(engine.sub_engine1, engine.clq_ass_to_node1, CPDpot, onodes, engine.pot_type);
+
+C = length(engine.clusters);
+newbel = cell(1,C);
+for c=1:C
+ k = engine.clq_ass_to_cluster1(c);
+ newbel{c} = marginalize_pot(clpot{k}, engine.clusters{c});
+end
+
+engine.bel = newbel;
+engine.bel_clpot = clpot;
+engine.slice1 = 1;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% this is unchanged from bk_inf_engine.
+% ENTER_EVIDENCE Add the specified evidence to the network (bk)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product instead of sum-product [0]
+% filter - if 1, do filtering, else smoothing [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+[ss T] = size(evidence);
+engine.filter = filter;
+engine.maximize = maximize;
+engine.T = T;
+
+if maximize
+ error('BK does not yet support max propagation')
+ % because it calls enter_soft_evidence, not enter_evidence
+end
+
+observed_bitv = ~isemptycell(evidence);
+onodes = find(observed_bitv);
+bnet = bnet_from_engine(engine);
+pot_type = determine_pot_type(bnet, onodes);
+CPDpot = convert_dbn_CPDs_to_pots(bnet, evidence, pot_type);
+[engine.clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed_bitv, pot_type, filter);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,115 @@
+function [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (bk)
+% [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+[ss T] = size(CPDpot);
+C = length(engine.clusters);
+S = length(engine.separators);
+Q = length(cliques_from_engine(engine.sub_engine));
+Q1 = length(cliques_from_engine(engine.sub_engine1));
+clpot = cell(Q,T);
+alpha = cell(C,T);
+
+% Forwards
+% The method is a generalization of the following HMM equation:
+% alpha(j,t) = normalise( (sum_i alpha(i,t-1) * transmat(i,j)) * obsmat(j,t) )
+% where alpha(j,t) = Pr(Q(t)=j | y(1:t))
+t = 1;
+[clpot(1:Q1,t), logscale(t)] = enter_soft_evidence(engine.sub_engine1, engine.clq_ass_to_node1(:), ...
+ CPDpot(:,1), find(observed(:,1)), pot_type);
+for c=1:C
+ k = engine.clq_ass_to_cluster1(c);
+ alpha{c,t} = marginalize_pot(clpot{k,t}, engine.clusters{c});
+end
+
+%=== FH: For each separator s, divide some cluster potential by s's potential
+alpha_orig = alpha(:,t);
+for s=1:S
+ c = engine.cluster_ass_to_separator(s);
+ alpha{c,t} = divide_by_pot(alpha{c,t}, marginalize_pot(alpha_orig{c}, engine.separators{s}));
+end
+
+% For filtering, clpot{1} contains evidence on slice 1 only
+
+%fprintf('alphas t=%d\n', t);
+%for c=1:8
+% temp = pot_to_marginal(alpha{c,t});
+% temp.T
+%end
+
+% clpot{t} contains evidence from slices t-1, t for t > 1
+clqs = [engine.clq_ass_to_cluster(:,1); engine.clq_ass_to_node(:,2)];
+for t=2:T
+ pots = [alpha(:,t-1); CPDpot(:,t)];
+ [clpot(:,t), logscale(t)] = enter_soft_evidence(engine.sub_engine, clqs, pots, find(observed(:,t-1:t)), pot_type);
+ for c=1:C
+ k = engine.clq_ass_to_cluster(c,2);
+ cl = engine.clusters{c};
+ alpha{c,t} = marginalize_pot(clpot{k,t}, cl+ss); % extract slice 2 posterior
+ alpha{c,t} = set_domain_pot(alpha{c,t}, cl); % shift back to slice 1 for re-use as prior
+ end
+ %=== FH: For each separator s, divide some cluster potential by s's potential
+ alpha_orig = alpha(:,t);
+ for s=1:S
+ c = engine.cluster_ass_to_separator(s);
+ alpha{c,t} = divide_by_pot(alpha{c,t}, marginalize_pot(alpha_orig{c}, engine.separators{s}));
+ end
+end
+
+loglik = sum(logscale);
+
+if filter
+ return;
+end
+
+% Backwards
+% The method is a generalization of the following HMM equation:
+% beta(i,t) = (sum_j transmat(i,j) * obsmat(j,t+1) * beta(j,t+1))
+% where beta(i,t) = Pr(y(t+1:T) | Q(t)=i)
+t = T;
+bnet = bnet_from_engine(engine);
+beta = cell(C,T);
+for c=1:C
+ beta{c,t} = mk_initial_pot(pot_type, engine.clusters{c} + ss, bnet.node_sizes(:), bnet.cnodes(:), ...
+ find(observed(:,t-1:t)));
+end
+%=== FH: For each separator s, divide some cluster potential by s's potential
+beta_orig = beta(:,t);
+for s=1:S
+ c = engine.cluster_ass_to_separator(s);
+ beta{c,t} = divide_by_pot(beta{c,t}, marginalize_pot(beta_orig{c}, engine.separators{s}+ss));
+end
+
+for t=T-1:-1:1
+ clqs = [engine.clq_ass_to_cluster(:,2); engine.clq_ass_to_node(:,2)];
+ pots = [beta(:,t+1); CPDpot(:,t+1)];
+ temp = enter_soft_evidence(engine.sub_engine, clqs, pots, find(observed(:,t:t+1)), pot_type);
+ for c=1:C
+ k = engine.clq_ass_to_cluster(c,1);
+ cl = engine.clusters{c};
+ beta{c,t} = marginalize_pot(temp{k}, cl); % extract slice 1
+ beta{c,t} = set_domain_pot(beta{c,t}, cl + ss); % shift fwd to slice 2
+ end
+ %=== FH: For each separator s, divide some cluster potential by s's potential
+ beta_orig = beta(:,t);
+ for s=1:S
+ c = engine.cluster_ass_to_separator(s);
+ beta{c,t} = divide_by_pot(beta{c,t}, marginalize_pot(beta_orig{c}, engine.separators{s}+ss));
+ end
+end
+
+% Combine
+% The method is a generalization of the following HMM equation:
+% xi(i,j,t) = normalise( alpha(i,t) * transmat(i,j) * obsmat(j,t+1) * beta(j,t+1) )
+% where xi(i,j,t) = Pr(Q(t)=i, Q(t+1)=j | y(1:T))
+for t=1:T-1
+ clqs = [engine.clq_ass_to_cluster(:); engine.clq_ass_to_node(:,2)];
+ pots = [alpha(:,t); beta(:,t+1); CPDpot(:,t+1)];
+ clpot(:,t+1) = enter_soft_evidence(engine.sub_engine, clqs, pots, find(observed(:,t:t+1)), pot_type);
+end
+% for smoothing, clpot{1} is undefined
+for k=1:Q1
+ clpot{k,1} = [];
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/junk
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/junk Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,176 @@
+function engine = cbk_inf_engine(bnet, varargin)
+% Just the same as bk_inf_engine, but you can specify overlapping clusters.
+
+ss = length(bnet.intra);
+% set default params
+clusters = 'exact';
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'clusters', clusters = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+ end
+end
+
+if strcmp(clusters, 'exact')
+ %clusters = { compute_interface_nodes(bnet.intra, bnet.inter) };
+ clusters = { 1:ss };
+elseif strcmp(clusters, 'ff')
+ clusters = num2cell(1:ss);
+end
+
+
+% We need to insert the prior on the clusters in slice 1,
+% and extract the posterior on the clusters in slice 2.
+% We don't need to care about the separators, b/c they're subsets of the clusters.
+C = length(clusters);
+clusters2 = cell(1,2*C);
+clusters2(1:C) = clusters;
+for c=1:C
+ clusters2{c+C} = clusters{c} + ss;
+end
+
+onodes = bnet.observed;
+obs_nodes = [onodes(:) onodes(:)+ss];
+engine.sub_engine = jtree_inf_engine(bnet, 'clusters', clusters2);
+
+%FH >>>
+%Compute separators.
+ns = bnet.node_sizes(:,1);
+ns(onodes) = 1;
+[clusters, separators] = build_jt(clusters, 1:length(ns), ns);
+S = length(separators);
+engine.separators = separators;
+
+%Compute size of clusters.
+cl_sizes = zeros(1,C);
+for c=1:C
+ cl_sizes(c) = prod(ns(clusters{c}));
+end
+
+%Assign separators to the smallest cluster subsuming them.
+engine.cluster_ass_to_separator = zeros(S, 1);
+for s=1:S
+ subsuming_clusters = [];
+ %find smaunk
+
+ for c=1:C
+ if mysubset(separators{s}, clusters{c})
+ subsuming_clusters(end+1) = c;
+ end
+ end
+ c = argmin(cl_sizes(subsuming_clusters));
+ engine.cluster_ass_to_separator(s) = subsuming_clusters(c);
+end
+
+%<<< FH
+
+engine.clq_ass_to_cluster = zeros(C, 2);
+for c=1:C
+ engine.clq_ass_to_cluster(c,1) = clq_containing_nodes(engine.sub_engine, clusters{c});
+ engine.clq_ass_to_cluster(c,2) = clq_containing_nodes(engine.sub_engine, clusters{c}+ss);
+end
+engine.clusters = clusters;
+
+engine.clq_ass_to_node = zeros(ss, 2);
+for i=1:ss
+ engine.clq_ass_to_node(i, 1) = clq_containing_nodes(engine.sub_engine, i);
+ engine.clq_ass_to_node(i, 2) = clq_containing_nodes(engine.sub_engine, i+ss);
+end
+
+
+
+% Also create an engine just for slice 1
+bnet1 = mk_bnet(bnet.intra1, bnet.node_sizes_slice, 'discrete', myintersect(bnet.dnodes, 1:ss), ...
+ 'equiv_class', bnet.equiv_class(:,1), 'observed', onodes);
+for i=1:max(bnet1.equiv_class)
+ bnet1.CPD{i} = bnet.CPD{i};
+end
+
+engine.sub_engine1 = jtree_inf_engine(bnet1, 'clusters', clusters);
+
+engine.clq_ass_to_cluster1 = zeros(1,C);
+for c=1:C
+ engine.clq_ass_to_cluster1(c) = clq_containing_nodes(engine.sub_engine1, clusters{c});
+end
+
+engine.clq_ass_to_node1 = zeros(1, ss);
+for i=1:ss
+ engine.clq_ass_to_node1(i) = clq_containing_nodes(engine.sub_engine1, i);
+end
+
+engine.clpot = []; % this is where we store the results between enter_evidence and marginal_nodes
+engine.filter = [];
+engine.maximize = [];
+engine.T = [];
+
+engine.bel = [];
+engine.bel_clpot = [];
+engine.slice1 = [];
+%engine.pot_type = 'cg';
+% hack for online inference so we can cope with hidden Gaussians and discrete
+% it will not affect the pot type used in enter_evidence
+engine.pot_type = determine_pot_type(bnet, onodes);
+
+engine = class(engine, 'cbk_inf_engine', inf_engine(bnet));
+
+
+
+
+function [cliques, seps, jt_size] = build_jt(cliques, vars, ns)
+% BUILD_JT connects the cliques into a jtree, computes the respective
+% separators and the size of the resulting jtree.
+%
+% [cliques, seps, jt_size] = build_jt(cliques, vars, ns)
+% ns(i) has to hold the size of vars(i)
+% vars has to be a superset of the union of cliques.
+
+%======== Compute the jtree with tool from BNT. This wants the vars to be 1:N.
+%==== Map from nodes to their indices.
+%disp('Computing jtree for cliques with vars and ns:');
+%cliques
+%vars
+%ns'
+
+inv_nodes = sparse(1,max(vars));
+N = length(vars);
+for i=1:N
+ inv_nodes(vars(i)) = i;
+end
+
+tmp_cliques = cell(1,length(cliques));
+%==== Temporarily map clique vars to their indices.
+for i=1:length(cliques)
+ tmp_cliques{i} = inv_nodes(cliques{i});
+end
+
+%=== Compute the jtree, using BNT.
+[jtree, root, B, w] = cliques_to_jtree(tmp_cliques, ns);
+
+
+%======== Now, compute the separators between connected cliques and their weights.
+seps = {};
+s_w = [];
+[is,js] = find(jtree > 0);
+for k=1:length(is)
+ i = is(k); j = js(k);
+ sep = vars(find(B(i,:) & B(j,:))); % intersect(cliques{i}, cliques{j});
+ if i>j | length(sep) == 0, continue; end;
+ seps{end+1} = sep;
+ s_w(end+1) = prod(ns(inv_nodes(seps{end})));
+end
+
+cl_w = sum(w);
+sep_w = sum(s_w);
+assert(cl_w > sep_w, 'Weight of cliques must be bigger than weight of separators');
+
+jt_size = cl_w + sep_w;
+% jt.cliques = cliques;
+% jt.seps = seps;
+% jt.size = jt_size;
+% jt.ns = ns';
+% jt;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function m = marginal_family(engine, i, t)
+% MARGINAL_FAMILY Compute the marginal on the specified family (bk)
+% marginal = marginal_family(engine, i, t)
+
+% This is just like inf_engine/marginal_family, except when we call
+% marginal_nodes, we provide a 4th argument, to tell it's a family.
+
+if nargin < 3, t = 1; end
+
+bnet = bnet_from_engine(engine);
+if t==1
+ m = marginal_nodes(engine, family(bnet.dag, i), t, 1);
+else
+ ss = length(bnet.intra);
+ fam = family(bnet.dag, i+ss);
+ if any(fam<=ss)
+ % i has a parent in the preceeding slice
+ % Hence the lowest numbered slice containing the family is t-1
+ m = marginal_nodes(engine, fam, t-1, 1);
+ else
+ % The family all fits inside slice t
+ % Hence shift the indexes back to slice 1
+ m = marginal_nodes(engine, fam-ss, t, 1);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,67 @@
+function marginal = marginal_nodes(engine, nodes, t, fam)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (bk)
+%
+% marginal = marginal_nodes(engine, i, t)
+% returns Pr(X(i,t) | Y(1:T)), where X(i,t) is the i'th node in the t'th slice.
+% If enter_evidence used filtering instead of smoothing, this will return Pr(X(i,t) | Y(1:t)).
+%
+% marginal = marginal_nodes(engine, query, t)
+% returns Pr(X(query(1),t), ... X(query(end),t) | Y(1:T)),
+% where X(q,t) is the q'th node in the t'th slice. If q > ss (slice size), this is equal
+% to X(q mod ss, t+1). That is, 't' specifies the time slice of the earliest node.
+% 'query' cannot span more than 2 time slices.
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3.
+
+if nargin < 3, t = 1; end
+if nargin < 4, fam = 0; else fam = 1; end
+
+
+% clpot{t} contains slice t-1 and t
+% Example
+% clpot #: 1 2 3
+% slices: 1 1,2 2,3
+% For filtering, we must take care not to take future evidence into account.
+% For smoothing, clpot{1} does not exist.
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+
+nodes2 = nodes;
+if ~engine.filter
+ if t < engine.T
+ slice = t+1;
+ else % earliest t is T, so all nodes fit in one slice
+ slice = engine.T;
+ nodes2 = nodes + ss;
+ end
+else
+ if t == 1
+ slice = 1;
+ else
+ if all(nodes<=ss)
+ slice = t;
+ nodes2 = nodes + ss;
+ elseif t == engine.T
+ slice = t;
+ else
+ slice = t + 1;
+ end
+ end
+end
+
+if engine.filter & t==1
+ c = clq_containing_nodes(engine.sub_engine1, nodes2, fam);
+else
+ c = clq_containing_nodes(engine.sub_engine, nodes2, fam);
+end
+assert(c >= 1);
+bigpot = engine.clpot{c, slice};
+
+pot = marginalize_pot(bigpot, nodes2);
+marginal = pot_to_marginal(pot);
+
+% we convert the domain to the unrolled numbering system
+% so that update_ess extracts the right evidence.
+marginal.domain = nodes+(t-1)*ss;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@cbk_inf_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (bk)
+% engine = update_engine(engine, newCPDs)
+
+engine.inf_engine = update_engine(engine.inf_engine, newCPDs);
+engine.sub_engine = update_engine(engine.sub_engine, newCPDs);
+
+bnet = bnet_from_engine(engine);
+eclass1 = bnet.equiv_class(:,1);
+engine.sub_engine1 = update_engine(engine.sub_engine1, newCPDs(1:max(eclass1)));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/ff_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/filter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/smooth_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@ff_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/enter_soft_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@ff_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function [marginals, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (bk_ff)
+% [marginals, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+assert(pot_type == 'd');
+[ss T] = size(CPDpot);
+fwd = cell(ss,T);
+hnodes = engine.hnodes(:)';
+onodes = engine.onodes(:)';
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes;
+onodes2 = [onodes onodes+ss];
+ns(onodes2) = 1;
+
+logscale = zeros(1,T);
+local_logscale = zeros(1,length(hnodes));
+
+t = 1;
+for i=hnodes
+ fwd{i,t} = CPDpot{i,t};
+end
+for i=onodes
+ p = parents(bnet.dag, i);
+ assert(length(p)==1);
+ ev = marginalize_pot(CPDpot{i,t}, p);
+ fwd{p,t} = multiply_by_pot(fwd{p,t}, ev);
+end
+for i=hnodes
+ [fwd{i,t}, local_logscale(i)] = normalize_pot(fwd{i,t});
+end
+logscale(t) = sum(local_logscale);
+
+for t=2:T
+ for i=hnodes
+ ps = parents(bnet.dag, i+ss);
+ assert(all(ps<=ss)); % in previous slice
+ prior = CPDpot{i,t};
+ for p=ps(:)'
+ prior = multiply_by_pot(prior, fwd{p,t-1});
+ end
+ fwd{i,t} = marginalize_pot(prior, i+ss);
+ fwd{i,t} = set_domain_pot(fwd{i,t}, i);
+ end
+ for i=onodes
+ p = parents(bnet.dag, i);
+ assert(length(p)==1);
+ temp = pot_to_marginal(CPDpot{i,t});
+ ev = dpot(p, ns(p), temp.T);
+ fwd{p,t} = multiply_by_pot(fwd{p,t}, ev);
+ end
+
+ for i=hnodes
+ [fwd{i,t}, local_logscale(i)] = normalize_pot(fwd{i,t});
+ end
+ logscale(t) = sum(local_logscale);
+end
+
+marginals = fwd;
+loglik = sum(logscale);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,94 @@
+function [marginals, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (ff)
+% [marginals, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+assert(pot_type == 'd');
+[ss T] = size(CPDpot);
+fwd = cell(ss,T);
+hnodes = engine.hnodes(:)';
+onodes = engine.onodes(:)';
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes;
+onodes2 = [onodes onodes+ss];
+ns(onodes2) = 1;
+
+logscale = zeros(1,T);
+H = length(hnodes);
+local_logscale = zeros(1,ss);
+
+obschild = zeros(1,ss);
+for i=hnodes
+ ocs = myintersect(children(bnet.dag, i), onodes);
+ assert(length(ocs)==1);
+ obschild(i) = ocs(1);
+end
+
+t = 1;
+for i=hnodes
+ fwd{i,t} = CPDpot{i,t};
+ c = obschild(i);
+ temp = pot_to_marginal(CPDpot{c,t});
+ ev = dpot(i, ns(i), temp.T);
+ fwd{i,t} = multiply_by_pot(fwd{i,t}, ev);
+ [fwd{i,t}, local_logscale(i)] = normalize_pot(fwd{i,t});
+end
+logscale(t) = sum(local_logscale);
+
+for t=2:T
+ for i=hnodes
+ ps = parents(bnet.dag, i+ss);
+ assert(all(ps<=ss)); % in previous slice
+ prior = CPDpot{i,t};
+ for p=ps(:)'
+ prior = multiply_by_pot(prior, fwd{p,t-1});
+ end
+ fwd{i,t} = marginalize_pot(prior, i+ss);
+ fwd{i,t} = set_domain_pot(fwd{i,t}, i);
+ c = obschild(i);
+ temp = pot_to_marginal(CPDpot{c,t});
+ ev = dpot(i, ns(i), temp.T);
+ fwd{i,t} = multiply_by_pot(fwd{i,t}, ev);
+ [fwd{i,t}, local_logscale(i)] = normalize_pot(fwd{i,t});
+ end
+ logscale(t) = sum(local_logscale);
+end
+
+loglik = sum(logscale);
+
+
+if filter
+ marginals = fwd;
+ return;
+end
+
+back = cell(ss,T);
+t = T;
+for i=hnodes
+ back{i,t} = dpot(i, ns(i));
+ back{i,t} = set_domain_pot(back{i,t}, i+ss);
+end
+for t=T-1:-1:1
+ for i=hnodes
+ pot = CPDpot{i,t+1};
+ pot = multiply_by_pot(pot, back{i,t+1});
+ c = obschild(i);
+ temp = pot_to_marginal(CPDpot{c,t+1});
+ ev = dpot(i, ns(i), temp.T);
+ pot = multiply_by_pot(pot, ev);
+ back{i,t} = marginalize_pot(pot, i);
+ back{i,t} = normalize_pot(back{i,t});
+ back{i,t} = set_domain_pot(back{i,t}, i+ss);
+ end
+end
+
+
+
+% COMBINE
+for t=1:T
+ for i=hnodes
+ back{i,t} = set_domain_pot(back{i,t}, i);
+ fwd{i,t} = multiply_by_pot(fwd{i,t}, back{i,t});
+ marginals{i,t} = normalize_pot(fwd{i,t});
+ %fwdback{i,t} = normalize_pot(multiply_pots(fwd{i,t}, back{i,t}));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/Old/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function marginal = marginal_family(engine, i, t)
+% MARGINAL_FAMILY Compute the marginal on the specified family (ff)
+% marginal = marginal_family(engine, i, t)
+
+if nargin < 3, t = 1; end
+
+% The method is similar to the following HMM equation:
+% xi(i,j,t) = normalise( alpha(i,t) * transmat(i,j) * obsmat(j,t+1) * beta(j,t+1) )
+% where xi(i,j,t) = Pr(Q(t)=i, Q(t+1)=j | y(1:T))
+
+bnet = bnet_from_engine(engine);
+
+if myismember(i, engine.onodes)
+ ps = parents(bnet.dag, i);
+ p = ps(1);
+ marginal = pot_to_marginal(engine.marginals{p,t});
+ marginal.domain = [p i];
+ return;
+end
+
+if t==1
+ marginal = pot_to_marginal(engine.marginals{i,t});
+ return;
+end
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+pot = engine.CPDpot{i,t};
+c = engine.obschild(i);
+pot = multiply_by_pot(pot, engine.CPDpot{c,t});
+pot = multiply_by_pot(pot, engine.back{i,t});
+ps = parents(bnet.dag, i+ss);
+for p=ps(:)'
+ pot = multiply_by_pot(pot, engine.fwd{p,t-1});
+end
+marginal = pot_to_marginal(normalize_pot(pot));
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (ff)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or
+% column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product (not yet supported), else sum-product [0]
+% filter - if 1, do filtering, else smoothing [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+assert(~maximize);
+
+
+[ss T] = size(evidence);
+observed = ~isemptycell(evidence);
+bnet = bnet_from_engine(engine);
+%pot_type = determine_pot_type(find(observed(:,1)), bnet.cnodes_slice, bnet.intra);
+pot_type = determine_pot_type(bnet, observed);
+% we assume we can use the same pot_type in all slices
+
+CPDpot = convert_dbn_CPDs_to_pots(bnet, evidence, pot_type);
+
+% Now convert CPDs on observed nodes to be potentials just on their parents
+assert(pot_type == 'd');
+onodes = bnet.observed(:);
+ns = bnet.node_sizes_slice;
+ns(onodes) = 1;
+for t=1:T
+ for i=onodes
+ p = parents(bnet.dag, i);
+ %CPDpot{i,t} = set_domain_pot(CPDpot{i,t}, p); % leaves size too long
+ temp = pot_to_marginal(CPDpot{i,t});
+ CPDpot{i,t} = dpot(p, ns(p), temp.T); % assumes pot_type = d
+ end
+end
+
+[engine.marginals, engine.fwd, engine.back, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter);
+
+engine.CPDpot = CPDpot;
+engine.filter = filter;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function [marginals, fwd, back, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (ff)
+% [marginals, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+if filter
+ [fwd, loglik] = filter_evidence(engine, CPDpot, observed, pot_type);
+ marginals = fwd;
+ back = [];
+else
+ [marginals, fwd, back, loglik] = smooth_evidence(engine, CPDpot, observed, pot_type);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/ff_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/ff_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function engine = ff_inf_engine(bnet)
+% FF_INF_ENGINE Factored frontier inference engine for DBNs
+% engine = ff_inf_engine(bnet)
+%
+% The model must be topologically isomorphic to an HMM.
+% In addition, each hidden node is assumed to have at most one observed child,
+% and each observed child is assumed to have exactly one hidden parent.
+%
+% For details of this algorithm, see
+% "The Factored Frontier Algorithm for Approximate Inference in DBNs",
+% Kevin Murphy and Yair Weiss, UAI 2001.
+%
+% THIS IS HIGHLY EXPERIMENTAL CODE!
+
+ss = length(bnet.intra);
+onodes = bnet.observed;
+hnodes = mysetdiff(1:ss, onodes);
+
+[persistent_nodes, transient_nodes] = partition_dbn_nodes(bnet.intra, bnet.inter);
+assert(isequal(onodes, transient_nodes));
+assert(isequal(hnodes, persistent_nodes));
+
+engine.onodes = onodes;
+engine.hnodes = hnodes;
+engine.marginals = [];
+engine.fwd = [];
+engine.back = [];
+engine.CPDpot = [];
+engine.filter = [];
+
+obschild = zeros(1,ss);
+for i=engine.hnodes(:)'
+ %ocs = myintersect(children(bnet.dag, i), onodes);
+ ocs = children(bnet.intra, i);
+ assert(length(ocs) <= 1);
+ if length(ocs)==1
+ obschild(i) = ocs(1);
+ end
+end
+engine.obschild = obschild;
+
+
+engine = class(engine, 'ff_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/filter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/filter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+function [fwd, loglik] = filter_evidence(engine, CPDpot, observed, pot_type)
+% [fwd, loglik] = filter_evidence(engine, CPDpot, observed, pot_type) (ff)
+
+[ss T] = size(CPDpot);
+fwd = cell(ss,T);
+hnodes = engine.hnodes(:)';
+onodes = engine.onodes(:)';
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes;
+onodes2 = [onodes onodes+ss];
+ns(onodes2) = 1;
+
+logscale = zeros(1,T);
+H = length(hnodes);
+local_logscale = zeros(1,ss);
+
+t = 1;
+for i=hnodes
+ fwd{i,t} = CPDpot{i,t};
+ c = engine.obschild(i);
+ if c > 0
+ fwd{i,t} = multiply_by_pot(fwd{i,t}, CPDpot{c, t});
+ end
+ [fwd{i,t}, local_logscale(i)] = normalize_pot(fwd{i,t});
+end
+logscale(t) = sum(local_logscale);
+
+for t=2:T
+ for i=hnodes
+ ps = parents(bnet.dag, i+ss);
+ assert(all(ps<=ss)); % in previous slice
+ prior = CPDpot{i,t};
+ for p=ps(:)'
+ prior = multiply_by_pot(prior, fwd{p,t-1});
+ end
+ fwd{i,t} = marginalize_pot(prior, i+ss);
+ fwd{i,t} = set_domain_pot(fwd{i,t}, i);
+ c = engine.obschild(i);
+ if c > 0
+ fwd{i,t} = multiply_by_pot(fwd{i,t}, CPDpot{c,t});
+ end
+ [fwd{i,t}, local_logscale(i)] = normalize_pot(fwd{i,t});
+ end
+ logscale(t) = sum(local_logscale);
+end
+
+loglik = sum(logscale);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function marginal = marginal_family(engine, i, t)
+% MARGINAL_FAMILY Compute the marginal on the specified family (ff)
+% marginal = marginal_family(engine, i, t)
+
+
+if engine.filter
+ error('can''t currently use marginal_family when filtering with ff');
+end
+
+if nargin < 3, t = 1; end
+
+% The method is similar to the following HMM equation:
+% xi(i,j,t) = normalise( alpha(i,t) * transmat(i,j) * obsmat(j,t+1) * beta(j,t+1) )
+% where xi(i,j,t) = Pr(Q(t)=i, Q(t+1)=j | y(1:T))
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+
+if myismember(i, engine.onodes)
+ ps = parents(bnet.dag, i);
+ p = ps(1);
+ marginal = pot_to_marginal(engine.marginals{ps(1),t});
+ fam = ([ps i]) + (t-1)*ss;
+elseif t==1
+ marginal = pot_to_marginal(engine.marginals{i,t});
+ fam = i + (t-1)*ss;
+else
+ pot = engine.CPDpot{i,t};
+ c = engine.obschild(i);
+ if c>0
+ pot = multiply_by_pot(pot, engine.CPDpot{c,t});
+ end
+ pot = multiply_by_pot(pot, engine.back{i,t});
+ ps = parents(bnet.dag, i+ss);
+ for p=ps(:)'
+ pot = multiply_by_pot(pot, engine.fwd{p,t-1});
+ end
+ marginal = pot_to_marginal(normalize_pot(pot));
+ fam = ([ps i+ss]) + (t-2)*ss;
+end
+
+% we convert the domain to the unrolled numbering system
+% so that update_ess extracts the right evidence.
+marginal.domain = fam;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+function marginal = marginal_nodes(engine, nodes, t)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (ff)
+% marginal = marginal_nodes(engine, i, t)
+% returns Pr(X(i,t) | Y(1:T)), where X(i,t) is the i'th node in the t'th slice.
+% If enter_evidence used filtering instead of smoothing, this will return Pr(X(i,t) | Y(1:t)).
+
+if nargin < 3, t = 1; end
+assert(length(nodes)==1);
+i = nodes(end);
+if myismember(i, engine.hnodes)
+ marginal = pot_to_marginal(engine.marginals{i,t});
+else
+ marginal = pot_to_marginal(dpot(i, 1, 1)); % observed
+end
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+% we convert the domain to the unrolled numbering system
+% so that update_ess extracts the right evidence.
+marginal.domain = nodes+(t-1)*ss;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/smooth_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@ff_inf_engine/smooth_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,89 @@
+function [marginals, fwd, back, loglik] = smooth_evidence(engine, CPDpot, observed, pot_type)
+% [marginals, fwd, back, loglik] = smooth_evidence(engine, CPDpot, observed, pot_type) (ff)
+
+error('ff smoothing is broken');
+
+[ss T] = size(CPDpot);
+fwd = cell(ss,T);
+hnodes = engine.hnodes(:)';
+onodes = engine.onodes(:)';
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes;
+onodes2 = [onodes onodes+ss];
+ns(onodes2) = 1;
+
+logscale = zeros(1,T);
+H = length(hnodes);
+local_logscale = zeros(1,ss);
+
+t = 1;
+for i=hnodes
+ fwd{i,t} = CPDpot{i,t};
+ c = engine.obschild(i);
+ if 0 % c > 0
+ fwd{i,t} = multiply_by_pot(fwd{i,t}, CPDpot{c, t});
+ end
+ [fwd{i,t}, local_logscale(i)] = normalize_pot(fwd{i,t});
+end
+logscale(t) = sum(local_logscale);
+
+for t=2:T
+ for i=hnodes
+ ps = parents(bnet.dag, i+ss);
+ assert(all(ps<=ss)); % in previous slice
+ prior = CPDpot{i,t};
+ for p=ps(:)'
+ prior = multiply_by_pot(prior, fwd{p,t-1});
+ end
+ fwd{i,t} = marginalize_pot(prior, i+ss);
+ fwd{i,t} = set_domain_pot(fwd{i,t}, i);
+ c = engine.obschild(i);
+ if 0 % c > 0
+ fwd{i,t} = multiply_by_pot(fwd{i,t}, CPDpot{c,t});
+ end
+ [fwd{i,t}, local_logscale(i)] = normalize_pot(fwd{i,t});
+ end
+ logscale(t) = sum(local_logscale);
+end
+
+loglik = sum(logscale);
+
+back = cell(ss,T);
+t = T;
+for i=hnodes
+ pot = dpot(i, ns(i));
+ cs = children(bnet.intra, i);
+ for c=cs(:)'
+ pot = multiply_pots(pot, CPDpot{c,t});
+ end
+ back{i,t} = marginalize_pot(pot, i);
+ back{i,t} = normalize_pot(back{i,t});
+ back{i,t} = set_domain_pot(back{i,t}, i+ss);
+end
+for t=T-1:-1:1
+ for i=hnodes
+ pot = dpot(i, ns(i));
+ cs = children(bnet.inter, i);
+ for c=cs(:)'
+ pot = multiply_pots(pot, back{c,t+1});
+ pot = multiply_pots(pot, CPDpot{c,t+1});
+ end
+ cs = children(bnet.intra, i);
+ for c=cs(:)'
+ pot = multiply_pots(pot, CPDpot{c,t});
+ end
+ back{i,t} = marginalize_pot(pot, i);
+ back{i,t} = normalize_pot(back{i,t});
+ back{i,t} = set_domain_pot(back{i,t}, i+ss);
+ end
+end
+
+
+% COMBINE
+for t=1:T
+ for i=hnodes
+ back{i,t} = set_domain_pot(back{i,t}, i);
+ fwd{i,t} = multiply_by_pot(fwd{i,t}, back{i,t});
+ marginals{i,t} = normalize_pot(fwd{i,t});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/frontier_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/set_fwdback.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@frontier_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (frontier)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product (not yet supported), else sum-product [0]
+% filter - if 1, do filtering, else smoothing [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+assert(~maximize);
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+onodes = find(~isemptycell(evidence));
+cnodes = unroll_set(bnet.cnodes(:), ss, T);
+pot_type = determine_pot_type(bnet, onodes);
+
+CPDpot = convert_dbn_CPDs_to_pots(bnet, evidence, pot_type);
+
+[engine.fwdback, loglik, engine.fwd_frontier, engine.back_frontier] = ...
+ enter_soft_evidence(engine, CPDpot, onodes, pot_type, filter);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,142 @@
+function [fwdback, loglik, fwd_frontier, back_frontier] = enter_soft_evidence(engine, CPD, onodes, pot_type, filter)
+% ENTER_SOFT_EVIDENCE Add soft evidence to network (frontier)
+% [fwdback, loglik] = enter_soft_evidence(engine, CPDpot, onodes, filter)
+
+if nargin < 3, filter = 0; end
+
+[ss T] = size(CPD);
+bnet = bnet_from_engine(engine);
+ns = repmat(bnet.node_sizes_slice(:), 1, T);
+cnodes = unroll_set(bnet.cnodes(:), ss, T);
+
+% FORWARDS
+fwd = cell(ss,T);
+ll = zeros(1,T);
+S = 2*ss; % num. intermediate frontiers to get from t to t+1
+frontier = cell(S,T);
+
+% Start with empty frontier, and add each node in slice 1
+init = mk_initial_pot(pot_type, [], ns, cnodes, onodes);
+t = 1;
+s = 1;
+j = 1;
+frontier{s,t} = update(init, j, 1, CPD{j}, engine.fdom1{s}, pot_type, ns, cnodes, onodes);
+fwd{j} = frontier{s,t};
+for s=2:ss
+ j = s; % add node j at step s
+ frontier{s,t} = update(frontier{s-1,t}, j, 1, CPD{j}, engine.fdom1{s}, pot_type, ns, cnodes, onodes);
+ fwd{j} = frontier{s,t};
+end
+frontier{S,t} = frontier{ss,t};
+[frontier{S,t}, ll(1)] = normalize_pot(frontier{S,t});
+
+% Now move frontier from slice to slice
+OPS = engine.ops;
+add = OPS>0;
+nodes = [zeros(S,1) unroll_set(abs(OPS(:)), ss, T-1)];
+for t=2:T
+ offset = (t-2)*ss;
+ for s=1:S
+ if s==1
+ prev_ndx = (t-2)*S + S; % S,t-1
+ else
+ prev_ndx = (t-1)*S + s-1; % s-1,t
+ end
+ j = nodes(s,t);
+ frontier{s,t} = update(frontier{prev_ndx}, j, add(s), CPD{j}, engine.fdom{s}+offset, pot_type, ns, cnodes, onodes);
+ if add(s)
+ fwd{j} = frontier{s,t};
+ end
+ end
+ [frontier{S,t}, ll(t)] = normalize_pot(frontier{S,t});
+end
+loglik = sum(ll);
+
+
+fwd_frontier = frontier;
+
+if filter
+ fwdback = fwd;
+ return;
+end
+
+
+% BACKWARDS
+back = cell(ss,T);
+add = ~add; % forwards add = backwards remove
+frontier = cell(S,T+1);
+t = T;
+dom = (1:ss) + (t-1)*ss;
+frontier{1,T+1} = mk_initial_pot(pot_type, dom, ns, cnodes, onodes); % all 1s for last slice
+for t=T:-1:2
+ offset = (t-2)*ss;
+ for s=S:-1:1 % reverse order
+ if s==S
+ prev_ndx = t*S + 1; % 1,t+1
+ else
+ prev_ndx = (t-1)*S + (s+1); % s+1,t
+ end
+ j = nodes(s,t);
+ if ~add(s)
+ back{j} = frontier{prev_ndx}; % save frontier before removing
+ end
+ frontier{s,t} = rev_update(frontier{prev_ndx}, t, s, j, add(s), CPD{j}, engine.fdom{s}+offset, pot_type, ns, cnodes, onodes);
+ end
+ frontier{1,t} = normalize_pot(frontier{1,t});
+end
+% Remove each node in first slice until left with empty set
+t = 1;
+frontier{ss+1,t} = frontier{1,2};
+add = 0;
+for s=ss:-1:1
+ j = s; % remove node j at step s
+ back{j} = frontier{s+1,t};
+ frontier{s,t} = rev_update(frontier{s+1,t}, t, s, j, add, CPD{j}, 1:s, pot_type, ns, cnodes, onodes);
+end
+
+% COMBINE
+for t=1:T
+ for i=1:ss
+ %fwd{i,t} = multiply_by_pot(fwd{i,t}, back{i,t});
+ %fwdback{i,t} = normalize_pot(fwd{i,t});
+ fwdback{i,t} = normalize_pot(multiply_pots(fwd{i,t}, back{i,t}));
+ end
+end
+
+back_frontier = frontier;
+
+%%%%%%%%%%
+function new_frontier = update(old_frontier, j, add, CPD, newdom, pot_type, ns, cnodes, onodes)
+
+if add
+ new_frontier = mk_initial_pot(pot_type, newdom, ns, cnodes, onodes);
+ new_frontier = multiply_by_pot(new_frontier, old_frontier);
+ new_frontier = multiply_by_pot(new_frontier, CPD);
+else
+ new_frontier = marginalize_pot(old_frontier, mysetdiff(domain_pot(old_frontier), j));
+end
+
+
+%%%%%%
+function new_frontier = rev_update(old_frontier, t, s, j, add, CPD, junk, pot_type, ns, cnodes, onodes)
+
+olddom = domain_pot(old_frontier);
+assert(isequal(junk, olddom));
+
+if add
+ % add: extend domain to include j by multiplying by 1
+ newdom = myunion(olddom, j);
+ new_frontier = mk_initial_pot(pot_type, newdom, ns, cnodes, onodes);
+ new_frontier = multiply_by_pot(new_frontier, old_frontier);
+ %fprintf('t=%d, s=%d, add %d to %s to make %s\n', t, s, j, num2str(olddom), num2str(newdom));
+else
+ % remove: multiply in CPT and then marginalize out j
+ % parents of j are guaranteed to be in old_frontier, else couldn't have added j on fwds pass
+ old_frontier = multiply_by_pot(old_frontier, CPD);
+ newdom = mysetdiff(olddom, j);
+ new_frontier = marginalize_pot(old_frontier, newdom);
+ %newdom2 = domain_pot(new_frontier);
+ %fprintf('t=%d, s=%d, rem %d from %s to make %s\n', t, s, j, num2str(olddom), num2str(newdom2));
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/frontier_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/frontier_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,121 @@
+function engine = frontier_inf_engine(bnet)
+% FRONTIER_INF_ENGINE Inference engine for DBNs which which uses the frontier algorithm.
+% engine = frontier_inf_engine(bnet)
+%
+% The frontier algorithm extends the forwards-backwards algorithm to DBNs in the obvious way,
+% maintaining a joint distribution (frontier) over all the nodes in a time slice.
+% When all the hidden nodes in the DBN are persistent (have children in the next time slice),
+% its theoretical running time is often similar to that of the junction tree algorithm,
+% although in practice, this algorithm seems to very slow (at least in matlab).
+% However, it is extremely simple to describe and implement.
+%
+% Suppose there are n binary nodes per slice, so the frontier takes O(2^n) space.
+% Each time step takes between O(n 2^{n+1}) and O(n 2^{2n}) operations, depending on the graph structure.
+% The lower bound is achieved by a set of n independent chains, as in a factorial HMM.
+% The upper bound is achieved by a set of n fully interconnected chains, as in an HMM.
+%
+% The factor of n arises because we need to multiply in each CPD from slice t+1.
+% The second factor depends on the size of the frontier to which we add the new node.
+% In an FHMM, once we have added X(i,t+1), we can marginalize out X(i,t) from the frontier, since
+% no other nodes depend on it; hence the frontier never contains more than n+1 nodes.
+% In a fully coupled HMM, we must leave X(i,t) in the frontier until all X(j,t+1) have been
+% added; hence the frontier will contain 2*n nodes at its peak.
+%
+% For details, see
+% "The Factored Frontier Algorithm for Approximate Inference in DBNs",
+% Kevin Murphy and Yair Weiss, UAI 01.
+
+ns = bnet.node_sizes_slice;
+onodes = bnet.observed;
+ns(onodes) = 1;
+ss = length(bnet.intra);
+
+[engine.ops, engine.fdom] = best_first_frontier_seq(ns, bnet.dag);
+engine.ops1 = 1:ss;
+
+engine.fwdback = [];
+engine.fwd_frontier = [];
+engine.back_frontier = [];
+
+engine.fdom1 = cell(1,ss);
+for s=1:ss
+ engine.fdom1{s} = 1:s;
+end
+
+engine = class(engine, 'frontier_inf_engine', inf_engine(bnet));
+
+
+%%%%%%%%%
+
+function [ops, frontier_set] = best_first_frontier_seq(ns, dag)
+% BEST_FIRST_FRONTIER_SEQ Do a greedy search for the sequence of additions/removals to the frontier.
+% [ops, frontier_set] = best_first_frontier_seq(ns, dag)
+%
+% We maintain 3 sets: the frontier (F), the right set (R), and the left set (L).
+% The invariant is that the nodes in R are d-separated from L given F.
+% We start with slice 1 in F and slice 2 in R.
+% The goal is to move slice 1 from F to L, and slice 2 from R to F, so as to minimize the size
+% of the frontier at each step, where the size(F) = product of the node-sizes of nodes in F.
+% A node may be removed (from F to L) if it has no children in R.
+% A node may be added (from R to F) if its parents are in F.
+%
+% ns(i) = num. discrete values node i can take on (i=1..ss, where ss = slice size)
+% dag is the (2*ss) x (2*ss) adjacency matrix for the 2-slice DBN.
+
+% Example:
+%
+% 4 9
+% ^ ^
+% | |
+% 2 -> 7
+% ^ ^
+% | |
+% 1 -> 6
+% | |
+% v v
+% 3 -> 8
+% | |
+% v V
+% 5 10
+%
+% ops = -4, -5, 6, -1, 7, -2, 8, -3, 9, 10
+
+ss = length(ns);
+ns = [ns(:)' ns(:)'];
+ops = zeros(1,ss);
+L = []; F = 1:ss; R = (1:ss)+ss;
+frontier_set = cell(1,2*ss);
+for s=1:2*ss
+ remcost = inf*ones(1,2*ss);
+ %disp(['L: ' num2str(L) ', F: ' num2str(F) ', R: ' num2str(R)]);
+ maybe_removable = myintersect(F, 1:ss);
+ for n=maybe_removable(:)'
+ cs = children(dag, n);
+ if isempty(myintersect(cs, R))
+ remcost(n) = prod(ns(mysetdiff(F, n)));
+ end
+ end
+ %remcost
+ if any(remcost < inf)
+ n = argmin(remcost);
+ ops(s) = -n;
+ L = myunion(L, n);
+ F = mysetdiff(F, n);
+ else
+ addcost = inf*ones(1,2*ss);
+ for n=R(:)'
+ ps = parents(dag, n);
+ if mysubset(ps, F)
+ addcost(n) = prod(ns(myunion(F, [ps n])));
+ end
+ end
+ %addcost
+ assert(any(addcost < inf));
+ n = argmin(addcost);
+ ops(s) = n;
+ R = mysetdiff(R, n);
+ F = myunion(F, n);
+ end
+ %fprintf('op at step %d = %d\n\n', s, ops(s));
+ frontier_set{s} = F;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function marginal = marginal_family(engine, i, t)
+% MARGINAL_FAMILY Compute the marginal on node i in slice t and its parents (frontier)
+% marginal = marginal_family(engine, i, t)
+
+bnet = bnet_from_engine(engine);
+fam = family(bnet.dag, i, t);
+marginal = pot_to_marginal(normalize_pot(marginalize_pot(engine.fwdback{i,t}, fam)));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function marginal = marginal_nodes(engine, nodes, t)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (frontier)
+% marginal = marginal_nodes(engine, nodes, t)
+%
+% 't' specifies the time slice of the earliest node in 'nodes'.
+% 'nodes' cannot span more than 2 time slices.
+%
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3,
+% i.e., nodes 3 and 5 in the unrolled network,
+
+if nargin < 3, t = 1; end
+assert(length(nodes)==1);
+i = nodes(1);
+bigpot = engine.fwdback{i,t};
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+nodes = nodes + (t-1)*ss;
+%if t > 1, nodes = nodes + ss; end
+marginal = pot_to_marginal(marginalize_pot(bigpot, nodes));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/set_fwdback.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@frontier_inf_engine/set_fwdback.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function engine = set_fwdback(engine, fb)
+% SET_FWDBACK Set the field 'fwdback', which contains the frontiers after propagation
+% engine = set_fwdback(engine, fb)
+%
+% This is used by frontier_fast_inf_engine/enter_evidence
+% as a workaround for Matlab's annoying privacy control
+
+engine.fwdback = fb;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+/enter_evidence.m/1.2/Sat Sep 17 17:00:30 2005//
+/find_mpe.m/1.1.1.1/Thu Jun 20 00:18:24 2002//
+/fwdback_twoslice.m/1.1/Sat Nov 26 01:24:09 2005//
+/hmm_inf_engine.m/1.1.1.1/Thu Nov 14 20:05:36 2002//
+/marginal_family.m/1.1.1.1/Thu Nov 14 20:05:36 2002//
+/marginal_nodes.m/1.1.1.1/Thu Nov 14 20:03:28 2002//
+/update_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D/Old////
+D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@hmm_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/dhmm_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@hmm_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/dhmm_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/dhmm_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function engine = dhmm_inf_engine(bnet, onodes)
+% DHMM_INF_ENGINE Inference engine for discrete DBNs which uses the forwards-backwards algorithm.
+% engine = dhmm_inf_engine(bnet, onodes)
+%
+% 'onodes' specifies which nodes are observed; these must be leaves, and can be discrete or continuous.
+% The remaining nodes are all hidden, and must be discrete.
+% The DBN is converted to an HMM, with a single meganode, but which may have factored obs.
+
+ss = length(bnet.intra);
+hnodes = mysetdiff(1:ss, onodes);
+evidence = cell(ss, 2);
+ns = bnet.node_sizes;
+Q = prod(ns(hnodes));
+tmp = dpot_to_table(compute_joint_pot(bnet, hnodes, evidence));
+engine.startprob = reshape(tmp, Q, 1);
+tmp = dpot_to_table(compute_joint_pot(bnet, [hnodes hnodes+ss], evidence));
+engine.transprob = mk_stochastic(reshape(tmp, Q, Q));
+engine.obsprob = cell(1, length(onodes));
+for i=1:length(onodes)
+ tmp = dpot_to_table(compute_joint_pot(bnet, [hnodes onodes(i)], evidence));
+ O = ns(onodes(i));
+ engine.obsprob{i} = mk_stochastic(reshape(tmp, Q, O));
+end
+
+% This is where we will store the results between enter_evidence and marginal_nodes
+engine.gamma = [];
+engine.xi = [];
+
+engine.onodes = onodes;
+engine.hnodes = hnodes;
+engine.maximize = [];
+
+engine = class(engine, 'dhmm_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+function marginal = marginal_family(engine, i, t, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (hmm)
+% marginal = marginal_nodes(engine, i, t, add_ev)
+%
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+if t==1
+ fam = family(bnet.dag, i);
+ bigpot = engine.one_slice_marginal{t};
+ nodes = fam;
+else
+ fam = family(bnet.dag, i+ss);
+ if any(fam <= ss) % family spans 2 slices
+ bigpot = engine.two_slice_marginal{t-1}; % t-1 and t
+ nodes = fam + (t-2)*ss;
+ else
+ bigpot = engine.one_slice_marginal{t};
+ nodes = fam-ss + (t-1)*ss;
+ end
+end
+
+marginal = pot_to_marginal(marginalize_pot(bigpot, nodes, engine.maximize));
+
+if add_ev
+ marginal = add_ev_to_dmarginal(marginal, engine.evidence, engine.node_sizes);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/Old/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function marginal = marginal_nodes(engine, nodes, t, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (hmm)
+% marginal = marginal_nodes(engine, nodes, t, add_ev)
+%
+% 't' specifies the time slice of the earliest node in 'nodes'.
+% 'nodes' cannot span more than 2 time slices.
+%
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3,
+% i.e., nodes 3 and 5 in the unrolled network,
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+if all(nodes <= ss)
+ bigpot = engine.one_slice_marginal{t};
+else
+ bigpot = engine.two_slice_marginal{t};
+end
+
+nodes = nodes + (t-1)*ss;
+marginal = pot_to_marginal(marginalize_pot(bigpot, nodes, engine.maximize));
+
+if add_ev
+ marginal = add_ev_to_dmarginal(marginal, engine.evidence, engine.node_sizes);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,64 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (hmm)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product (not yet supported), else sum-product [0]
+% filter - if 1, does filtering, else smoothing [0]
+% oneslice - 1 means only compute marginals on nodes within a single slice [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+oneslice = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ case 'oneslice', oneslice = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+[ss T] = size(evidence);
+engine.maximize = maximize;
+engine.evidence = evidence;
+bnet = bnet_from_engine(engine);
+engine.node_sizes = repmat(bnet.node_sizes_slice(:), [1 T]);
+
+obs_bitv = ~isemptycell(evidence(:));
+bitv = reshape(obs_bitv, ss, T);
+for t=1:T
+ onodes = find(bitv(:,t));
+ if ~isequal(onodes, bnet.observed(:))
+ error(['dbn was created assuming observed nodes per slice were '...
+ num2str(bnet.observed(:)') ' but the evidence in slice ' num2str(t) ...
+ ' has observed nodes ' num2str(onodes(:)')]);
+ end
+end
+
+obslik = mk_hmm_obs_lik_matrix(engine, evidence);
+
+%[alpha, beta, gamma, loglik, xi] = fwdback(engine.startprob, engine.transprob, obslik, ...
+[alpha, beta, gamma, loglik, xi] = fwdback_twoslice(engine, engine.startprob,...
+ engine.transprob, obslik, ...
+ 'maximize', maximize, 'fwd_only', filter, ...
+ 'compute_xi', ~oneslice);
+
+engine.one_slice_marginal = gamma; % gamma(:,t) for t=1:T
+if ~oneslice
+ Q = size(gamma,1);
+ engine.two_slice_marginal = reshape(xi, [Q*Q T-1]); % xi(:,t) for t=1:T-1
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/find_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/find_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function mpe = find_mpe(engine, evidence)
+% FIND_MPE Find the most probable explanation (Viterbi)
+% mpe = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+
+obslik = mk_hmm_obs_lik_matrix(engine, evidence);
+path = viterbi_path(engine.startprob, engine.transprob, obslik);
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes_slice;
+ns(bnet.observed) = 1;
+ass = ind2subv(ns, path);
+mpe = num2cell(ass');
+mpe(bnet.observed,:) = evidence(bnet.observed,:);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/fwdback_twoslice.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/fwdback_twoslice.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,198 @@
+function [alpha, beta, gamma, loglik, xi, gamma2] = fwdback_twoslice(engine, init_state_distrib, transmat, obslik, varargin)
+% FWDBACK Compute the posterior probs. in an HMM using the forwards backwards algo.
+%
+% [alpha, beta, gamma, loglik, xi, gamma2] = fwdback(init_state_distrib, transmat, obslik, ...)
+%
+% Notation:
+% Y(t) = observation, Q(t) = hidden state, M(t) = mixture variable (for MOG outputs)
+% A(t) = discrete input (action) (for POMDP models)
+%
+% INPUT:
+% init_state_distrib(i) = Pr(Q(1) = i)
+% transmat(i,j) = Pr(Q(t) = j | Q(t-1)=i)
+% or transmat{a}(i,j) = Pr(Q(t) = j | Q(t-1)=i, A(t-1)=a) if there are discrete inputs
+% obslik(i,t) = Pr(Y(t)| Q(t)=i)
+% (Compute obslik using eval_pdf_xxx on your data sequence first.)
+%
+% Optional parameters may be passed as 'param_name', param_value pairs.
+% Parameter names are shown below; default values in [] - if none, argument is mandatory.
+%
+% For HMMs with MOG outputs: if you want to compute gamma2, you must specify
+% 'obslik2' - obslik(i,j,t) = Pr(Y(t)| Q(t)=i,M(t)=j) []
+% 'mixmat' - mixmat(i,j) = Pr(M(t) = j | Q(t)=i) []
+%
+% For HMMs with discrete inputs:
+% 'act' - act(t) = action performed at step t
+%
+% Optional arguments:
+% 'fwd_only' - if 1, only do a forwards pass and set beta=[], gamma2=[] [0]
+% 'scaled' - if 1, normalize alphas and betas to prevent underflow [1]
+% 'maximize' - if 1, use max-product instead of sum-product [0]
+%
+% OUTPUTS:
+% alpha(i,t) = p(Q(t)=i | y(1:t)) (or p(Q(t)=i, y(1:t)) if scaled=0)
+% beta(i,t) = p(y(t+1:T) | Q(t)=i)*p(y(t+1:T)|y(1:t)) (or p(y(t+1:T) | Q(t)=i) if scaled=0)
+% gamma(i,t) = p(Q(t)=i | y(1:T))
+% loglik = log p(y(1:T))
+% xi(i,j,t-1) = p(Q(t-1)=i, Q(t)=j | y(1:T))
+% gamma2(j,k,t) = p(Q(t)=j, M(t)=k | y(1:T)) (only for MOG outputs)
+%
+% If fwd_only = 1, these become
+% alpha(i,t) = p(Q(t)=i | y(1:t))
+% beta = []
+% gamma(i,t) = p(Q(t)=i | y(1:t))
+% xi(i,j,t-1) = p(Q(t-1)=i, Q(t)=j | y(1:t))
+% gamma2 = []
+%
+% Note: we only compute xi if it is requested as a return argument, since it can be very large.
+% Similarly, we only compute gamma2 on request (and if using MOG outputs).
+%
+% Examples:
+%
+% [alpha, beta, gamma, loglik] = fwdback(pi, A, multinomial_prob(sequence, B));
+%
+% [B, B2] = mixgauss_prob(data, mu, Sigma, mixmat);
+% [alpha, beta, gamma, loglik, xi, gamma2] = fwdback(pi, A, B, 'obslik2', B2, 'mixmat', mixmat);
+
+
+if nargout >= 5, compute_xi = 1; else compute_xi = 0; end
+if nargout >= 6, compute_gamma2 = 1; else compute_gamma2 = 0; end
+
+[obslik2, mixmat, fwd_only, scaled, act, maximize, compute_xi, compute_gamma2] = process_options(varargin, 'obslik2', [], 'mixmat', [], 'fwd_only', 0, 'scaled', 1, 'act', [], 'maximize', 0, 'compute_xi', compute_xi, 'compute_gamma2', compute_gamma2);
+
+
+[Q T] = size(obslik);
+
+if isempty(obslik2)
+ compute_gamma2 = 0;
+end
+
+if isempty(act)
+ act = ones(1,T);
+ transmat = { transmat } ;
+end
+
+scale = ones(1,T);
+
+% scale(t) = Pr(O(t) | O(1:t-1)) = 1/c(t) as defined by Rabiner (1989).
+% Hence prod_t scale(t) = Pr(O(1)) Pr(O(2)|O(1)) Pr(O(3) | O(1:2)) = Pr(O(1), ... ,O(T))
+% or log P = sum_t log scale(t).
+% Rabiner suggests multiplying beta(t) by scale(t), but we can instead
+% normalise beta(t) - the constants will cancel when we compute gamma.
+
+loglik = 0;
+
+alpha = zeros(Q,T);
+gamma = zeros(Q,T);
+if compute_xi
+ xi = zeros(Q,Q,T-1);
+else
+ xi = [];
+end
+
+
+%%%%%%%%% Forwards %%%%%%%%%%
+
+t = 1;
+alpha(:,1) = init_state_distrib(:) .* obslik(:,t);
+if scaled
+ %[alpha(:,t), scale(t)] = normaliseC(alpha(:,t));
+ [alpha(:,t), scale(t)] = normalise(alpha(:,t));
+end
+if scaled, assert(approxeq(sum(alpha(:,t)),1)), end
+for t=2:T
+ %trans = transmat(:,:,act(t-1))';
+ trans = transmat{act(t-1)};
+ if maximize
+ m = max_mult(trans', alpha(:,t-1));
+ %A = repmat(alpha(:,t-1), [1 Q]);
+ %m = max(trans .* A, [], 1);
+ else
+ m = trans' * alpha(:,t-1);
+ end
+ alpha(:,t) = m(:) .* obslik(:,t);
+ if scaled
+ %[alpha(:,t), scale(t)] = normaliseC(alpha(:,t));
+ [alpha(:,t), scale(t)] = normalise(alpha(:,t));
+ end
+ if compute_xi & fwd_only % useful for online EM
+ %xi(:,:,t-1) = normaliseC((alpha(:,t-1) * obslik(:,t)') .* trans);
+ xi(:,:,t-1) = normalise((alpha(:,t-1) * obslik(:,t)') .* trans);
+ end
+ if scaled, assert(approxeq(sum(alpha(:,t)),1)), end
+end
+if scaled
+ if any(scale==0)
+ loglik = -inf;
+ else
+ loglik = sum(log(scale));
+ end
+else
+ loglik = log(sum(alpha(:,T)));
+end
+
+if fwd_only
+ gamma = alpha;
+ beta = [];
+ gamma2 = [];
+ return;
+end
+
+
+%%%%%%%%% Backwards %%%%%%%%%%
+
+beta = zeros(Q,T);
+if compute_gamma2
+ M = size(mixmat, 2);
+ gamma2 = zeros(Q,M,T);
+else
+ gamma2 = [];
+end
+
+beta(:,T) = ones(Q,1);
+%gamma(:,T) = normaliseC(alpha(:,T) .* beta(:,T));
+gamma(:,T) = normalise(alpha(:,T) .* beta(:,T));
+t=T;
+if compute_gamma2
+ denom = obslik(:,t) + (obslik(:,t)==0); % replace 0s with 1s before dividing
+ gamma2(:,:,t) = obslik2(:,:,t) .* mixmat .* repmat(gamma(:,t), [1 M]) ./ repmat(denom, [1 M]);
+ %gamma2(:,:,t) = normaliseC(obslik2(:,:,t) .* mixmat .* repmat(gamma(:,t), [1 M])); % wrong!
+end
+for t=T-1:-1:1
+ b = beta(:,t+1) .* obslik(:,t+1);
+ %trans = transmat(:,:,act(t));
+ trans = transmat{act(t)};
+ if maximize
+ B = repmat(b(:)', Q, 1);
+ beta(:,t) = max(trans .* B, [], 2);
+ else
+ beta(:,t) = trans * b;
+ end
+ if scaled
+ %beta(:,t) = normaliseC(beta(:,t));
+ beta(:,t) = normalise(beta(:,t));
+ end
+ %gamma(:,t) = normaliseC(alpha(:,t) .* beta(:,t));
+ gamma(:,t) = normalise(alpha(:,t) .* beta(:,t));
+ if compute_xi
+ %xi(:,:,t) = normaliseC((trans .* (alpha(:,t) * b')));
+ xi(:,:,t) = normalise((trans .* (alpha(:,t) * b')));
+ %xi(:,:,t) = (trans .* (alpha(:,t) * b'));
+ end
+ if compute_gamma2
+ denom = obslik(:,t) + (obslik(:,t)==0); % replace 0s with 1s before dividing
+ gamma2(:,:,t) = obslik2(:,:,t) .* mixmat .* repmat(gamma(:,t), [1 M]) ./ repmat(denom, [1 M]);
+ %gamma2(:,:,t) = normaliseC(obslik2(:,:,t) .* mixmat .* repmat(gamma(:,t), [1 M]));
+ end
+end
+
+
+% We now explain the equation for gamma2
+% Let zt=y(1:t-1,t+1:T) be all observations except y(t)
+% gamma2(Q,M,t) = P(Qt,Mt|yt,zt) = P(yt|Qt,Mt,zt) P(Qt,Mt|zt) / P(yt|zt)
+% = P(yt|Qt,Mt) P(Mt|Qt) P(Qt|zt) / P(yt|zt)
+% Now gamma(Q,t) = P(Qt|yt,zt) = P(yt|Qt) P(Qt|zt) / P(yt|zt)
+% hence
+% P(Qt,Mt|yt,zt) = P(yt|Qt,Mt) P(Mt|Qt) [P(Qt|yt,zt) P(yt|zt) / P(yt|Qt)] / P(yt|zt)
+% = P(yt|Qt,Mt) P(Mt|Qt) P(Qt|yt,zt) / P(yt|Qt)
+%
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/hmm_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/hmm_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+function engine = hmm_inf_engine(bnet, varargin)
+% HMM_INF_ENGINE Inference engine for DBNs which uses the forwards-backwards algorithm.
+% engine = hmm_inf_engine(bnet, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - 1 means max-product, 0 means sum-product [0]
+%
+% The DBN is converted to an HMM with a single meganode, but the observed nodes remain factored.
+% This can be faster than jtree if the num. hidden nodes is low, because of lower constant factors.
+%
+% All hidden nodes must be discrete.
+% All observed nodes are assumed to be leaves, i.e., they cannot be parents of anything.
+% The parents of each observed leaf are assumed to be a subset of the hidden nodes within the same slice.
+% The only exception is if bnet is an AR-HMM, where the parents are assumed to be self in the
+% previous slice (continuous), plus all the discrete nodes in the current slice.
+
+ss = bnet.nnodes_per_slice;
+
+engine.maximize = 0;
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', engine.maximize = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+% Stuff to do with speeding up marginal_family
+[int, engine.persist, engine.transient] = compute_interface_nodes(bnet.intra, bnet.inter);
+engine.persist_bitv = zeros(1, ss);
+engine.persist_bitv(engine.persist) = 1;
+
+
+ns = bnet.node_sizes(:);
+ns(bnet.observed) = 1;
+ns(bnet.observed+ss) = 1;
+engine.eff_node_sizes = ns;
+
+for o=bnet.observed(:)'
+ %if bnet.equiv_class(o,1) ~= bnet.equiv_class(o,2)
+ % error(['observed node ' num2str(o) ' is not tied'])
+ %end
+ cs = children(bnet.dag, o);
+ if ~isempty(cs)
+ error(['observed node ' num2str(o) ' is not allowed children'])
+ end
+end
+
+[engine.startprob, engine.transprob, engine.obsprob] = dbn_to_hmm(bnet);
+
+% This is where we will store the results between enter_evidence and marginal_nodes
+engine.one_slice_marginal = [];
+engine.two_slice_marginal = [];
+
+ss = length(bnet.intra);
+engine.evidence = [];
+engine.node_sizes = [];
+
+% avoid the need to do bnet_from_engine, which is slow
+engine.slice_size = ss;
+engine.parents = bnet.parents;
+
+engine = class(engine, 'hmm_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+function marginal = marginal_family(engine, i, t, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (hmm)
+% marginal = marginal_family(engine, i, t, add_ev)
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+
+ns = engine.eff_node_sizes(:);
+ss = engine.slice_size;
+
+if t==1 | ~engine.persist_bitv(i)
+ bigT = engine.one_slice_marginal(:,t);
+ ps = engine.parents{i};
+ dom = [ps i] + (t-1)*ss;
+ bigdom = 1:ss;
+ bigsz = ns(bigdom);
+ bigdom = bigdom + (t-1)*ss;
+else % some parents are in previous slice
+ bigT = engine.two_slice_marginal(:,t-1); % t-1 and t
+ ps = engine.parents{i+ss};
+ dom = [ps i+ss] + (t-2)*ss;
+ bigdom = 1:(2*ss); % domain of xi(:,:,t)
+ bigsz = ns(bigdom);
+ bigdom = bigdom + (t-2)*ss;
+end
+marginal.domain = dom;
+
+marginal.T = marg_table(bigT, bigdom, bigsz, dom, engine.maximize);
+marginal.mu = [];
+marginal.Sigma = [];
+
+if add_ev
+ marginal = add_ev_to_dmarginal(marginal, engine.evidence, engine.node_sizes);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function marginal = marginal_nodes(engine, nodes, t, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (hmm)
+% marginal = marginal_nodes(engine, nodes, t, add_ev)
+%
+% 'nodes' must be a single node.
+% t is the time slice.
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+
+assert(length(nodes)==1)
+ss = engine.slice_size;
+
+i = nodes(1);
+bigT = engine.one_slice_marginal(:,t);
+dom = i + (t-1)*ss;
+
+ns = engine.eff_node_sizes(:);
+bigdom = 1:ss;
+marginal.T = marg_table(bigT, bigdom + (t-1)*ss, ns(bigdom), dom, engine.maximize);
+
+marginal.domain = dom;
+marginal.mu = [];
+marginal.Sigma = [];
+
+if add_ev
+ marginal = add_ev_to_dmarginal(marginal, engine.evidence, engine.node_sizes);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+/mk_hmm_obs_lik_matrix.m/1.1.1.1/Sun May 4 21:42:26 2003//
+/mk_hmm_obs_lik_vec.m/1.1.1.1/Thu Jan 23 18:50:10 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@hmm_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_matrix.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_matrix.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function obslik = mk_hmm_obs_lik_matrix(engine, evidence)
+
+T = size(evidence,2);
+Q = length(engine.startprob);
+obslik = ones(Q, T);
+bnet = bnet_from_engine(engine);
+% P(o1,o2| Q1,Q2) = P(o1|Q1,Q2) * P(o2|Q1,Q2)
+onodes = bnet.observed;
+for i=1:length(onodes)
+ data = cell2num(evidence(onodes(i),:));
+ if bnet.auto_regressive(onodes(i))
+ params = engine.obsprob{i};
+ mu = params.big_mu;
+ Sigma = params.big_Sigma,
+ W = params.big_W;
+ mu0 = params.big_mu0;
+ Sigma0 = params.big_Sigma0;
+ %obslik_i = mk_arhmm_obs_lik(data, mu, Sigma, W, mu0, Sigma0
+ obslik_i = clg_prob(data(:,1:T-1), data(:,2:T), mu, Sigma, W);
+ obslik_i = [mixgauss_prob(data(:,1), mu0, Sigma0) obslik_i];
+ elseif myismember(onodes(i), bnet.dnodes)
+ %obslik_i = eval_pdf_cond_multinomial(data, engine.obsprob{i}.big_CPT);
+ obslik_i = multinomial_prob(data, engine.obsprob{i}.big_CPT);
+ else
+ %obslik_i = eval_pdf_cond_gauss(data, engine.obsprob{i}.big_mu, engine.obsprob{i}.big_Sigma);
+ obslik_i = mixgauss_prob(data, engine.obsprob{i}.big_mu, engine.obsprob{i}.big_Sigma);
+ end
+ obslik = obslik .* obslik_i;
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_vec.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_vec.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function obslik = mk_hmm_obs_lik_vec(engine, evidence)
+
+% P(o1,o2| h) = P(o1|h) * P(o2|h) where h = Q1,Q2,...
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+onodes = bnet.observed;
+hnodes = mysetdiff(1:ss, onodes);
+ns = bnet.node_sizes(:);
+ns(onodes) = 1;
+
+Q = length(engine.startprob);
+obslik = ones(Q, 1);
+
+for i=1:length(onodes)
+ o = onodes(i);
+ %data = cell2num(evidence(o,1));
+ data = evidence{o,1};
+ if myismember(o, bnet.dnodes)
+ obslik_i = eval_pdf_cond_multinomial(data, engine.obsprob{i}.CPT);
+ else
+ if bnet.auto_regressive(o)
+ error('can''t handle AR nodes')
+ end
+ %% calling mk_ghmm_obs_lik, which calls gaussian_prob, is slow, so we inline it
+ %% and use the pre-computed inverse matrix
+ %obslik_i = mk_ghmm_obs_lik(data, engine.obsprob{i}.mu, engine.obsprob{i}.Sigma);
+ x = data(:);
+ m = engine.obsprob{i}.mu;
+ Qi = size(m, 2);
+ obslik_i = size(Qi, 1);
+ invC = engine.obsprob{i}.inv_Sigma;
+ denom = engine.obsprob{i}.denom;
+ for j=1:Qi
+ numer = exp(-0.5 * (x-m(:,j))' * invC(:,:,j) * (x-m(:,j)));
+ obslik_i(j) = numer / denom(j);
+ end
+ end
+ % convert P(o|ps) into P(o|h) by multiplying onto a (h,o) potential of all 1s
+ ps = bnet.parents{o};
+ dom = [ps o];
+ obspot_i = dpot(dom, ns(dom), obslik_i);
+ dom = [hnodes o];
+ obspot = dpot(dom, ns(dom));
+ obspot = multiply_by_pot(obspot, obspot_i);
+ % compute p(oi|h) * p(oj|h)
+ S = struct(obspot);
+ obslik = obslik .* S.T(:);
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@hmm_inf_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (hmm)
+% engine = update_engine(engine, newCPDs)
+
+%engine.inf_engine.bnet.CPD = newCPDs;
+engine.inf_engine = update_engine(engine.inf_engine, newCPDs);
+[engine.startprob, engine.transprob, engine.obsprob] = dbn_to_hmm(bnet_from_engine(engine));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/enter_soft_evidence1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence2.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence3.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence4.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,119 @@
+function [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (jtree_dbn)
+% [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+[ss T] = size(CPDpot);
+Q = length(engine.jtree_struct.cliques);
+clpot = cell(Q,T); % clpot{t} contains evidence from slices (t-1, t)
+seppot = cell(Q,Q,T);
+ll = zeros(1,Q);
+logscale = zeros(1,T);
+bnet = bnet_from_engine(engine);
+
+% Forwards pass.
+% Compute distribution on clq C,
+% where C is the out interface to (t-1,t).
+% Then pass this to clq D, where D is the in inferface to (t+1,t).
+% Then propagate from D to later slices.
+
+C = engine.out_clq;
+assert(C==engine.jtree_struct.root_clq);
+D = engine.in_clq;
+slice1 = 1:ss;
+slice2 = slice1 + ss;
+for t=2:T
+ if t==2
+ clqs = engine.jtree_struct.clq_ass_to_node([slice1 slice2]);
+ pots = CPDpot(:,t-1:t);
+ else
+ %clqs = [D; engine.clq_ass_to_node(:,2)];
+ clqs = [D engine.jtree_struct.clq_ass_to_node(slice2)];
+ phiC = set_domain_pot(phiC, engine.interface); % shift back to slice 1
+ pots = [ {phiC}; CPDpot(:,t)]; % CPDpot domains are always slice 2
+ end
+ [clpot(:,t), seppot(:,:,t)] = init_pot(engine.jtree_struct.cliques, clqs, pots, pot_type, ...
+ find(observed(:,t-1:t)), bnet.node_sizes(:), bnet.cnodes);
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(C);
+
+ phiC = marginalize_pot(clpot{C,t}, engine.interface+ss, engine.maximize);
+end
+
+
+
+% Backwards pass.
+% Pass evidence from clq C to clq D,
+% where C is the in interface to (t,t+1) and D is the out inferface to (t-1,t)
+% Then propagate evidence from D to earlier slices.
+C = engine.in_clq;
+D = engine.out_clq;
+for t=T:-1:2
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(C);
+
+ if t >= 3
+ phiC = marginalize_pot(clpot{C,t}, engine.interface, engine.maximize);
+ phiC = set_domain_pot(phiC, engine.interface+ss); % shift forward to slice 2
+ phiD = marginalize_pot(clpot{D,t-1}, engine.interface+ss, engine.maximize);
+ ratio = divide_by_pot(phiC, phiD);
+ clpot{D,t-1} = multiply_by_pot(clpot{D,t-1}, ratio);
+ end
+end
+
+loglik = sum(logscale);
+
+
+%%%%%%%
+function [clpot, seppot] = init_pot(cliques, clqs, pots, pot_type, onodes, ns, cnodes);
+
+% Set the clique potentials to all 1s
+C = length(cliques);
+clpot = cell(1,C);
+for i=1:C
+ clpot{i} = mk_initial_pot(pot_type, cliques{i}, ns, cnodes, onodes);
+end
+
+% Multiply on specified potentials
+for i=1:length(clqs)
+ c = clqs(i);
+ clpot{c} = multiply_by_pot(clpot{c}, pots{i});
+end
+
+seppot = cell(C,C); % implicitely initialized to 1
+
+
+%%%%
+function [clpot, seppot] = collect_evidence(clpot, seppot, maximize, postorder, postorder_parents,...
+ separator)
+for n=postorder %postorder(1:end-1)
+ for p=postorder_parents{n}
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, separator{p,n}, maximize);
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+end
+
+
+%%%%
+function [clpot, seppot] = distribute_evidence(clpot, seppot, maximize, preorder, preorder_children,...
+ separator)
+for n=preorder
+ for c=preorder_children{n}
+ clpot{c} = divide_by_pot(clpot{c}, seppot{n,c});
+ seppot{n,c} = marginalize_pot(clpot{n}, separator{n,c}, maximize);
+ clpot{c} = multiply_by_pot(clpot{c}, seppot{n,c});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,143 @@
+function [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (jtree_dbn)
+% [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+[ss T] = size(CPDpot);
+Q = length(engine.jtree_struct.cliques);
+clpot = cell(Q,T); % clpot{t} contains evidence from slices (t-1, t)
+seppot = cell(Q,Q,T);
+ll = zeros(1,Q);
+logscale = ones(1,T); % log(logscale(1)) = 0
+bnet = bnet_from_engine(engine);
+
+slice1 = 1:ss;
+slice2 = slice1+ss;
+
+% calibrate each 2-slice jtree in isolation
+for t=2:T
+ if t==2
+ clqs = engine.jtree_struct.clq_ass_to_node([slice1 slice2]);
+ pots = CPDpot(:,t-1:t);
+ else
+ clqs = engine.jtree_struct.clq_ass_to_node(slice2);
+ pots = CPDpot(:,t); % CPDpot domains are always slice 2
+ end
+ [clpot(:,t), sepot(:,:,t)] = init_pot(engine.jtree_struct.cliques, clqs, pots, pot_type, ...
+ find(observed(:,t-1:t)), bnet.node_sizes(:), bnet.cnodes);
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+end
+
+% Forwards pass.
+% Compute distribution on clq C,
+% where C is the out interface to (t-1,t).
+% Then pass this to clq D, where D is the in inferface to (t+1,t).
+% Then propagate from D to later slices.
+
+C = engine.out_clq;
+D = engine.in_clq;
+for t=2:T-1
+ phiC = marginalize_pot(clpot{C,t}, engine.interface+ss, engine.maximize);
+ phiC = set_domain_pot(phiC, engine.interface); % shift back to slice 1
+ phiD = marginalize_pot(clpot{D,t+1}, engine.interface, engine.maximize);
+ ratio = divide_by_pot(phiC, phiD);
+ clpot{D,t+1} = multiply_by_pot(clpot{D,t+1}, ratio);
+
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(1);
+end
+
+% Backwards pass.
+% Pass evidence from clq C to clq D,
+% where C is the in interface to (t,t+1) and D is the out inferface to (t-1,t)
+% Then propagate evidence from D to earlier slices.
+C = engine.in_clq;
+D = engine.out_clq;
+for t=T:-1:2
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(1);
+
+ if t >= 3
+ phiC = marginalize_pot(clpot{C,t}, engine.interface, engine.maximize);
+ phiC = set_domain_pot(phiC, engine.interface+ss); % shift forward to slice 2
+ phiD = marginalize_pot(clpot{D,t-1}, engine.interface+ss, engine.maximize);
+ ratio = divide_by_pot(phiC, phiD);
+ clpot{D,t-1} = multiply_by_pot(clpot{D,t-1}, ratio);
+ end
+end
+
+loglik = sum(logscale);
+
+%%%%%%%%%%
+
+function [clpot, seppot] = calibrate(engine, clpot, seppot)
+
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+
+
+%%%%%%%
+function [clpot, seppot] = init_pot(cliques, clqs, pots, pot_type, onodes, ns, cnodes);
+
+% Set the clique potentials to all 1s
+C = length(cliques);
+clpot = cell(1,C);
+for i=1:C
+ clpot{i} = mk_initial_pot(pot_type, cliques{i}, ns, cnodes, onodes);
+end
+
+% Multiply on specified potentials
+for i=1:length(clqs)
+ c = clqs(i);
+ clpot{c} = multiply_by_pot(clpot{c}, pots{i});
+end
+
+seppot = cell(C,C); % implicitely initialized to 1
+
+
+%%%%
+function [clpot, seppot] = collect_evidence(clpot, seppot, maximize, postorder, postorder_parents,...
+ separator)
+for n=postorder %postorder(1:end-1)
+ for p=postorder_parents{n}
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, separator{p,n}, maximize);
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+end
+
+
+%%%%
+function [clpot, seppot] = distribute_evidence(clpot, seppot, maximize, preorder, preorder_children,...
+ separator)
+for n=preorder
+ for c=preorder_children{n}
+ clpot{c} = divide_by_pot(clpot{c}, seppot{n,c});
+ seppot{n,c} = marginalize_pot(clpot{n}, separator{n,c}, maximize);
+ clpot{c} = multiply_by_pot(clpot{c}, seppot{n,c});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,125 @@
+function [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (jtree_dbn)
+% [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+[ss T] = size(CPDpot);
+Q = length(engine.jtree_struct.cliques);
+clpot = cell(Q,T); % clpot{t} contains evidence from slices (t-1, t)
+seppot = cell(Q,Q,T);
+ll = zeros(1,Q);
+logscale = zeros(1,T);
+bnet = bnet_from_engine(engine);
+
+% Forwards pass.
+% Compute distribution on clq C,
+% where C is the out interface to (t-1,t).
+% Then pass this to clq D, where D is the in inferface to (t+1,t).
+% Then propagate from D to later slices.
+
+C = engine.out_clq;
+assert(C==engine.jtree_struct.root_clq);
+D = engine.in_clq;
+slice1 = 1:ss;
+slice2 = slice1 + ss;
+Ntransient = length(engine.transient);
+trans = cell(Ntransient,1);
+for t=2:T
+ if t==2
+ clqs = engine.jtree_struct.clq_ass_to_node([slice1 slice2]);
+ pots = CPDpot(:,t-1:t);
+ else
+ %clqs = [D; engine.clq_ass_to_node(:,2)];
+ clqs = [D engine.jtree_struct.clq_ass_to_node([engine.transient slice2])];
+ phiC = set_domain_pot(phiC, engine.interface); % shift back to slice 1
+ for i=1:Ntransient
+ trans{i} = CPDpot{engine.transient(i), t-1};
+ trans{i} = set_domain_pot(trans{i}, domain_pot(trans{i})-ss); % shift back to slice 1
+ end
+ pots = [ {phiC}; trans; CPDpot(:,t)];
+ end
+ [clpot(:,t), seppot(:,:,t)] = init_pot(engine.jtree_struct.cliques, clqs, pots, pot_type, ...
+ find(observed(:,t-1:t)), bnet.node_sizes(:), bnet.cnodes);
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(C);
+
+ phiC = marginalize_pot(clpot{C,t}, engine.interface+ss, engine.maximize);
+end
+
+
+
+% Backwards pass.
+% Pass evidence from clq C to clq D,
+% where C is the in interface to (t,t+1) and D is the out inferface to (t-1,t)
+% Then propagate evidence from D to earlier slices.
+C = engine.in_clq;
+D = engine.out_clq;
+for t=T:-1:2
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(C);
+
+ if t >= 3
+ phiC = marginalize_pot(clpot{C,t}, engine.interface, engine.maximize);
+ phiC = set_domain_pot(phiC, engine.interface+ss); % shift forward to slice 2
+ phiD = marginalize_pot(clpot{D,t-1}, engine.interface+ss, engine.maximize);
+ ratio = divide_by_pot(phiC, phiD);
+ clpot{D,t-1} = multiply_by_pot(clpot{D,t-1}, ratio);
+ end
+end
+
+loglik = sum(logscale);
+
+
+%%%%%%%
+function [clpot, seppot] = init_pot(cliques, clqs, pots, pot_type, onodes, ns, cnodes);
+
+% Set the clique potentials to all 1s
+C = length(cliques);
+clpot = cell(1,C);
+for i=1:C
+ clpot{i} = mk_initial_pot(pot_type, cliques{i}, ns, cnodes, onodes);
+end
+
+% Multiply on specified potentials
+for i=1:length(clqs)
+ c = clqs(i);
+ clpot{c} = multiply_by_pot(clpot{c}, pots{i});
+end
+
+seppot = cell(C,C); % implicitely initialized to 1
+
+
+%%%%
+function [clpot, seppot] = collect_evidence(clpot, seppot, maximize, postorder, postorder_parents,...
+ separator)
+for n=postorder %postorder(1:end-1)
+ for p=postorder_parents{n}
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, separator{p,n}, maximize);
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+end
+
+
+%%%%
+function [clpot, seppot] = distribute_evidence(clpot, seppot, maximize, preorder, preorder_children,...
+ separator)
+for n=preorder
+ for c=preorder_children{n}
+ clpot{c} = divide_by_pot(clpot{c}, seppot{n,c});
+ seppot{n,c} = marginalize_pot(clpot{n}, separator{n,c}, maximize);
+ clpot{c} = multiply_by_pot(clpot{c}, seppot{n,c});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence4.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence4.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,149 @@
+function [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (jtree_dbn)
+% [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+[ss T] = size(CPDpot);
+Q = length(engine.jtree_struct.cliques);
+clpot = cell(Q,T); % clpot{t} contains evidence from slices (t-1, t)
+seppot = cell(Q,Q,T);
+ll = zeros(1,Q);
+logscale = ones(1,T); % log(logscale(1)) = 0
+bnet = bnet_from_engine(engine);
+
+slice1 = 1:ss;
+slice2 = slice1+ss;
+Ntransient = length(engine.transient);
+trans = cell(Ntransient,1);
+
+% calibrate each 2-slice jtree in isolation
+for t=2:T
+ if t==2
+ clqs = engine.jtree_struct.clq_ass_to_node([slice1 slice2]);
+ pots = CPDpot(:,t-1:t);
+ else
+ clqs = engine.jtree_struct.clq_ass_to_node([engine.transient slice2]);
+ for i=1:Ntransient
+ trans{i} = CPDpot{engine.transient(i), t-1};
+ trans{i} = set_domain_pot(trans{i}, domain_pot(trans{i})-ss); % shift back to slice 1
+ end
+ pots = [ trans; CPDpot(:,t)];
+ end
+ [clpot(:,t), sepot(:,:,t)] = init_pot(engine.jtree_struct.cliques, clqs, pots, pot_type, ...
+ find(observed(:,t-1:t)), bnet.node_sizes(:), bnet.cnodes);
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+end
+
+% Forwards pass.
+% Compute distribution on clq C,
+% where C is the out interface to (t-1,t).
+% Then pass this to clq D, where D is the in inferface to (t+1,t).
+% Then propagate from D to later slices.
+
+C = engine.out_clq;
+D = engine.in_clq;
+for t=2:T-1
+ phiC = marginalize_pot(clpot{C,t}, engine.interface+ss, engine.maximize);
+ phiC = set_domain_pot(phiC, engine.interface); % shift back to slice 1
+ phiD = marginalize_pot(clpot{D,t+1}, engine.interface, engine.maximize);
+ ratio = divide_by_pot(phiC, phiD);
+ clpot{D,t+1} = multiply_by_pot(clpot{D,t+1}, ratio);
+
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(1);
+end
+
+% Backwards pass.
+% Pass evidence from clq C to clq D,
+% where C is the in interface to (t,t+1) and D is the out inferface to (t-1,t)
+% Then propagate evidence from D to earlier slices.
+C = engine.in_clq;
+D = engine.out_clq;
+for t=T:-1:2
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(1);
+
+ if t >= 3
+ phiC = marginalize_pot(clpot{C,t}, engine.interface, engine.maximize);
+ phiC = set_domain_pot(phiC, engine.interface+ss); % shift forward to slice 2
+ phiD = marginalize_pot(clpot{D,t-1}, engine.interface+ss, engine.maximize);
+ ratio = divide_by_pot(phiC, phiD);
+ clpot{D,t-1} = multiply_by_pot(clpot{D,t-1}, ratio);
+ end
+end
+
+loglik = sum(logscale);
+
+%%%%%%%%%%
+
+function [clpot, seppot] = calibrate(engine, clpot, seppot)
+
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+
+
+%%%%%%%
+function [clpot, seppot] = init_pot(cliques, clqs, pots, pot_type, onodes, ns, cnodes);
+
+% Set the clique potentials to all 1s
+C = length(cliques);
+clpot = cell(1,C);
+for i=1:C
+ clpot{i} = mk_initial_pot(pot_type, cliques{i}, ns, cnodes, onodes);
+end
+
+% Multiply on specified potentials
+for i=1:length(clqs)
+ c = clqs(i);
+ clpot{c} = multiply_by_pot(clpot{c}, pots{i});
+end
+
+seppot = cell(C,C); % implicitely initialized to 1
+
+
+%%%%
+function [clpot, seppot] = collect_evidence(clpot, seppot, maximize, postorder, postorder_parents,...
+ separator)
+for n=postorder %postorder(1:end-1)
+ for p=postorder_parents{n}
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, separator{p,n}, maximize);
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+end
+
+
+%%%%
+function [clpot, seppot] = distribute_evidence(clpot, seppot, maximize, preorder, preorder_children,...
+ separator)
+for n=preorder
+ for c=preorder_children{n}
+ clpot{c} = divide_by_pot(clpot{c}, seppot{n,c});
+ seppot{n,c} = marginalize_pot(clpot{n}, separator{n,c}, maximize);
+ clpot{c} = multiply_by_pot(clpot{c}, seppot{n,c});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Broken/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+function marginal = marginal_nodes(engine, nodes, t, fam)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (bk)
+%
+% marginal = marginal_nodes(engine, i, t)
+% returns Pr(X(i,t) | Y(1:T)), where X(i,t) is the i'th node in the t'th slice.
+%
+% marginal = marginal_nodes(engine, query, t)
+% returns Pr(X(query(1),t), ... X(query(end),t) | Y(1:T)),
+% where 't' specifies the time slice of the earliest node in the query.
+% 'query' cannot span more than 2 time slices.
+%
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3.
+
+if nargin < 3, t = 1; end
+if nargin < 4, fam = 0; else fam = 1; end
+
+
+% clpot{t} contains slice t-1 and t
+% Example
+% clpot #: 1 2 3
+% slices: 1 1,2 2,3
+% For filtering, we must take care not to take future evidence into account.
+% For smoothing, clpot{1} does not exist.
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+
+if t < engine.T
+ slice = t+1;
+ nodes2 = nodes;
+else % earliest t is T, so all nodes fit in one slice
+ slice = engine.T;
+ nodes2 = nodes + ss;
+end
+
+c = clq_containing_nodes(engine.jtree_engine, nodes2, fam);
+assert(c >= 1);
+
+%disp(['computing marginal on ' num2str(nodes) ' t = ' num2str(t)]);
+%disp(['using ' num2str(nodes2) ' slice = ' num2str(slice) 'clq = ' num2str(c)]);
+
+bigpot = engine.clpot{c, slice};
+
+pot = marginalize_pot(bigpot, nodes2, engine.maximize);
+marginal = pot_to_marginal(pot);
+
+% we convert the domain to the unrolled numbering system
+% so that update_ess extracts the right evidence.
+marginal.domain = nodes+(t-1)*ss;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/enter_evidence.m/1.1.1.1/Sat Jan 11 18:41:30 2003//
+/enter_soft_evidence.m/1.1.1.1/Thu Feb 19 01:12:08 2004//
+/jtree_dbn_inf_engine.m/1.1.1.1/Thu Nov 14 16:32:00 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Fri Nov 22 23:51:58 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+A D/Broken////
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@jtree_dbn_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/enter_soft_evidence_nonint.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence_trans.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/jtree_dbn_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/jtree_dbn_inf_engine1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/jtree_dbn_inf_engine2.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@jtree_dbn_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_nonint.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_nonint.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,135 @@
+function [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (jtree_dbn)
+% [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+[ss T] = size(CPDpot);
+Q = length(engine.jtree_struct.cliques);
+clpot = cell(Q,T); % clpot{t} contains evidence from slices (t-1, t)
+seppot = cell(Q,Q,T);
+ll = zeros(1,Q);
+logscale = zeros(1,T);
+bnet = bnet_from_engine(engine);
+
+% Forwards pass.
+% Compute distribution on clq C,
+% where C is the out interface to (t-1,t).
+% Then pass this to clq D, where D is the in inferface to (t+1,t).
+% Then propagate from D to later slices.
+
+C = engine.out_clq;
+assert(C==engine.jtree_struct.root_clq);
+D = engine.in_clq;
+slice1 = 1:ss;
+slice2 = slice1 + ss;
+Nnonint = length(engine.nonint);
+nonint = cell(Nnonint, 1);
+for t=1:T
+ if t==1
+ pots = [CPDpot(:,1); CPDpot(engine.interface, 2)];
+ clqs = engine.jtree_struct.clq_ass_to_node([slice1 engine.interface+ss]);
+ obs = find(observed(:,1:2));
+ elseif t==T
+ clqs = [D engine.jtree_struct.clq_ass_to_node(engine.nonint)];
+ phiC = set_domain_pot(phiC, engine.interface); % shift back to slice 1
+ for i=1:Nnonint
+ nonint{i} = CPDpot{engine.nonint(i), t};
+ nonint{i} = set_domain_pot(nonint{i}, domain_pot(nonint{i})-ss); % shift back to slice 1
+ end
+ pots = [ {phiC}; nonint];
+ obs = find(observed(:,T));
+ else
+ clqs = [D engine.jtree_struct.clq_ass_to_node([engine.nonint engine.interface+ss])];
+ phiC = set_domain_pot(phiC, engine.interface); % shift back to slice 1
+ for i=1:Nnonint
+ nonint{i} = CPDpot{engine.nonint(i), t};
+ nonint{i} = set_domain_pot(nonint{i}, domain_pot(nonint{i})-ss); % shift back to slice 1
+ end
+ pots = [ {phiC}; nonint; CPDpot(engine.interface, t+1)];
+ obs = find(observed(:,t:t+1));
+ end
+ [clpot(:,t), seppot(:,:,t)] = init_pot(engine.jtree_struct.cliques, clqs, pots, pot_type, ...
+ obs, bnet.node_sizes(:), bnet.cnodes);
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(C);
+
+ phiC = marginalize_pot(clpot{C,t}, engine.interface+ss, engine.maximize);
+end
+
+
+
+% Backwards pass.
+% Pass evidence from clq C to clq D,
+% where C is the in interface to (t,t+1) and D is the out inferface to (t-1,t)
+% Then propagate evidence from D to earlier slices.
+C = engine.in_clq;
+D = engine.out_clq;
+for t=T:-1:1
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ %logscale(t) = ll(C);
+
+ if t >= 2
+ phiC = marginalize_pot(clpot{C,t}, engine.interface, engine.maximize);
+ phiC = set_domain_pot(phiC, engine.interface+ss); % shift forward to slice 2
+ phiD = marginalize_pot(clpot{D,t-1}, engine.interface+ss, engine.maximize);
+ ratio = divide_by_pot(phiC, phiD);
+ clpot{D,t-1} = multiply_by_pot(clpot{D,t-1}, ratio);
+ end
+end
+
+loglik = sum(logscale);
+
+
+%%%%%%%
+function [clpot, seppot] = init_pot(cliques, clqs, pots, pot_type, onodes, ns, cnodes);
+
+% Set the clique potentials to all 1s
+C = length(cliques);
+clpot = cell(1,C);
+for i=1:C
+ clpot{i} = mk_initial_pot(pot_type, cliques{i}, ns, cnodes, onodes);
+end
+
+% Multiply on specified potentials
+for i=1:length(clqs)
+ c = clqs(i);
+ clpot{c} = multiply_by_pot(clpot{c}, pots{i});
+end
+
+seppot = cell(C,C); % implicitely initialized to 1
+
+
+%%%%
+function [clpot, seppot] = collect_evidence(clpot, seppot, maximize, postorder, postorder_parents,...
+ separator)
+for n=postorder %postorder(1:end-1)
+ for p=postorder_parents{n}
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, separator{p,n}, maximize);
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+end
+
+
+%%%%
+function [clpot, seppot] = distribute_evidence(clpot, seppot, maximize, preorder, preorder_children,...
+ separator)
+for n=preorder
+ for c=preorder_children{n}
+ clpot{c} = divide_by_pot(clpot{c}, seppot{n,c});
+ seppot{n,c} = marginalize_pot(clpot{n}, separator{n,c}, maximize);
+ clpot{c} = multiply_by_pot(clpot{c}, seppot{n,c});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_trans.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_trans.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,135 @@
+function [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (jtree_dbn)
+% [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type, filter)
+
+[ss T] = size(CPDpot);
+Q = length(engine.jtree_struct.cliques);
+clpot = cell(Q,T); % clpot{t} contains evidence from slices (t-1, t)
+seppot = cell(Q,Q,T);
+ll = zeros(1,Q);
+logscale = zeros(1,T);
+bnet = bnet_from_engine(engine);
+
+% Forwards pass.
+% Compute distribution on clq C,
+% where C is the out interface to (t-1,t).
+% Then pass this to clq D, where D is the in inferface to (t+1,t).
+% Then propagate from D to later slices.
+
+C = engine.out_clq;
+assert(C==engine.jtree_struct.root_clq);
+D = engine.in_clq;
+slice1 = 1:ss;
+slice2 = slice1 + ss;
+Ntransient = length(engine.transient);
+trans = cell(Ntransient,1);
+for t=1:T
+ if t==1
+ pots = [CPDpot(:,1); CPDpot(engine.persist, 2)];
+ clqs = engine.jtree_struct.clq_ass_to_node([slice1 engine.persist+ss]);
+ obs = find(observed(:,1:2));
+ elseif t==T
+ clqs = [D engine.jtree_struct.clq_ass_to_node(engine.transient)];
+ phiC = set_domain_pot(phiC, engine.interface); % shift back to slice 1
+ for i=1:Ntransient
+ trans{i} = CPDpot{engine.transient(i), t};
+ trans{i} = set_domain_pot(trans{i}, domain_pot(trans{i})-ss); % shift back to slice 1
+ end
+ pots = [ {phiC}; trans];
+ obs = find(observed(:,T));
+ else
+ clqs = [D engine.jtree_struct.clq_ass_to_node([engine.transient engine.persist+ss])];
+ phiC = set_domain_pot(phiC, engine.interface); % shift back to slice 1
+ for i=1:Ntransient
+ trans{i} = CPDpot{engine.transient(i), t};
+ trans{i} = set_domain_pot(trans{i}, domain_pot(trans{i})-ss); % shift back to slice 1
+ end
+ pots = [ {phiC}; trans; CPDpot(engine.persist, t+1)];
+ obs = find(observed(:,t:t+1));
+ end
+ [clpot(:,t), seppot(:,:,t)] = init_pot(engine.jtree_struct.cliques, clqs, pots, pot_type, ...
+ obs, bnet.node_sizes(:), bnet.cnodes);
+ [clpot(:,t), seppot(:,:,t)] = collect_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.postorder, ...
+ engine.jtree_struct.postorder_parents,...
+ engine.jtree_struct.separator);
+
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(C);
+
+ phiC = marginalize_pot(clpot{C,t}, engine.interface+ss, engine.maximize);
+end
+
+
+
+% Backwards pass.
+% Pass evidence from clq C to clq D,
+% where C is the in interface to (t,t+1) and D is the out inferface to (t-1,t)
+% Then propagate evidence from D to earlier slices.
+C = engine.in_clq;
+D = engine.out_clq;
+for t=T:-1:1
+ [clpot(:,t), seppot(:,:,t)] = distribute_evidence(clpot(:,t), seppot(:,:,t), engine.maximize, ...
+ engine.jtree_struct.preorder, ...
+ engine.jtree_struct.preorder_children, ...
+ engine.jtree_struct.separator);
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ %logscale(t) = ll(C);
+
+ if t >= 2
+ phiC = marginalize_pot(clpot{C,t}, engine.interface, engine.maximize);
+ phiC = set_domain_pot(phiC, engine.interface+ss); % shift forward to slice 2
+ phiD = marginalize_pot(clpot{D,t-1}, engine.interface+ss, engine.maximize);
+ ratio = divide_by_pot(phiC, phiD);
+ clpot{D,t-1} = multiply_by_pot(clpot{D,t-1}, ratio);
+ end
+end
+
+loglik = sum(logscale);
+
+
+%%%%%%%
+function [clpot, seppot] = init_pot(cliques, clqs, pots, pot_type, onodes, ns, cnodes);
+
+% Set the clique potentials to all 1s
+C = length(cliques);
+clpot = cell(1,C);
+for i=1:C
+ clpot{i} = mk_initial_pot(pot_type, cliques{i}, ns, cnodes, onodes);
+end
+
+% Multiply on specified potentials
+for i=1:length(clqs)
+ c = clqs(i);
+ clpot{c} = multiply_by_pot(clpot{c}, pots{i});
+end
+
+seppot = cell(C,C); % implicitely initialized to 1
+
+
+%%%%
+function [clpot, seppot] = collect_evidence(clpot, seppot, maximize, postorder, postorder_parents,...
+ separator)
+for n=postorder %postorder(1:end-1)
+ for p=postorder_parents{n}
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, separator{p,n}, maximize);
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+end
+
+
+%%%%
+function [clpot, seppot] = distribute_evidence(clpot, seppot, maximize, preorder, preorder_children,...
+ separator)
+for n=preorder
+ for c=preorder_children{n}
+ clpot{c} = divide_by_pot(clpot{c}, seppot{n,c});
+ seppot{n,c} = marginalize_pot(clpot{n}, separator{n,c}, maximize);
+ clpot{c} = multiply_by_pot(clpot{c}, seppot{n,c});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,67 @@
+function engine = jtree_dbn_inf_engine(bnet, varargin)
+% JTREE_DBN_INF_ENGINE Junction tree inference algorithm for DBNs.
+
+ss = length(bnet.intra);
+
+onodes = [];
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'observed', onodes = args{i+1};
+ end
+ end
+end
+
+[int, engine.persist, engine.transient] = compute_interface_nodes(bnet.intra, bnet.inter);
+%engine.interface = engine.persist; % WRONG!
+engine.interface = int;
+engine.nonint = mysetdiff(1:ss, int);
+
+if 0
+ % Create a 2 slice jtree
+ % We force there to be cliques containing the in and out interfaces for slices t and t+1.
+ obs_nodes = [onodes(:) onodes(:)+ss];
+ engine.jtree_engine = jtree_inf_engine(bnet, 'observed', obs_nodes(:), ...
+ 'clusters', {int, int+ss}, 'root', int+ss);
+else
+ % Create a "1.5 slice" jtree, containing slice 1 and the interface nodes of slice 2
+ nodes15 = [1:ss int+ss];
+ N = length(nodes15);
+ dag15 = bnet.dag(nodes15, nodes15);
+ ns15 = bnet.node_sizes(nodes15);
+ eclass15 = bnet.equiv_class(nodes15);
+ discrete_bitv = zeros(1,2*ss);
+ discrete_bitv(bnet.dnodes) = 1;
+ discrete15 = find(discrete_bitv(nodes15));
+ bnet15 = mk_bnet(dag15, ns15, 'equiv_class', eclass15, 'discrete', discrete15);
+ bnet15.CPD = bnet.CPD; % CPDs for non-interface nodes in slice 2 will not be used
+ obs_bitv = zeros(1, 2*ss);
+ obs_bitv([onodes onodes+ss]) = 1;
+ obs_nodes15 = find(obs_bitv(nodes15));
+ int_bitv = zeros(1,ss);
+ int_bitv(int) = 1;
+ engine.jtree_engine = jtree_inf_engine(bnet15, 'observed', obs_nodes15(:), ...
+ 'clusters', {int, int+ss}, 'root', int+ss);
+end
+
+engine.in_clq = clq_containing_nodes(engine.jtree_engine, int);
+engine.out_clq = clq_containing_nodes(engine.jtree_engine, int+ss);
+
+engine.clq_ass_to_node = zeros(ss, 2);
+for i=1:ss
+ engine.clq_ass_to_node(i, 1) = clq_containing_nodes(engine.jtree_engine, i);
+ engine.clq_ass_to_node(i, 2) = clq_containing_nodes(engine.jtree_engine, i+ss);
+end
+
+engine.jtree_struct = struct(engine.jtree_engine); % violate object privacy
+
+% stuff needed by marginal_nodes
+engine.clpot = [];
+engine.maximize = [];
+engine.T = [];
+
+engine = class(engine, 'jtree_dbn_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+function engine = jtree_dbn_inf_engine(bnet, varargin)
+% JTREE_DBN_INF_ENGINE Junction tree inference algorithm for DBNs.
+
+ss = length(bnet.intra);
+
+onodes = [];
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'observed', onodes = args{i+1};
+ end
+ end
+end
+
+[int, engine.persist, engine.transient] = compute_interface_nodes(bnet.intra, bnet.inter);
+%engine.interface = engine.persist; % WRONG!
+engine.interface = int;
+engine.nonint = mysetdiff(1:ss, int);
+
+if 1
+ % Create a 2 slice jtree
+ % We force there to be cliques containing the in and out interfaces for slices t and t+1.
+ obs_nodes = [onodes(:) onodes(:)+ss];
+ engine.jtree_engine = jtree_inf_engine(bnet, 'observed', obs_nodes(:), ...
+ 'clusters', {int, int+ss}, 'root', int+ss);
+else
+ % Create a "1.5 slice" jtree, containing slice 1 and the interface nodes of slice 2
+ % To keep the node numbering the same, we simply disconnect the non-interface nodes
+ % from slice 2.
+ intra15 = bnet.intra;
+ for i=engine.nonint(:)'
+ intra15(i,:) = 0;
+ intra15(:,i) = 0;
+ end
+ bnet15 = mk_dbn(intra15, bnet.inter, bnet.node_sizes_slice, bnet.dnodes_slice, ...
+ bnet.equiv_class(:,1), bnet.equiv_class(:,2), bnet.intra);
+ obs_nodes = [onodes(:) onodes(:)+ss];
+ engine.jtree_engine = jtree_inf_engine(bnet15, 'observed', obs_nodes(:), ...
+ 'clusters', {int, int+ss}, 'root', int+ss);
+end
+
+engine.in_clq = clq_containing_nodes(engine.jtree_engine, int);
+engine.out_clq = clq_containing_nodes(engine.jtree_engine, int+ss);
+
+engine.clq_ass_to_node = zeros(ss, 2);
+for i=1:ss
+ engine.clq_ass_to_node(i, 1) = clq_containing_nodes(engine.jtree_engine, i);
+ engine.clq_ass_to_node(i, 2) = clq_containing_nodes(engine.jtree_engine, i+ss);
+end
+
+engine.jtree_struct = struct(engine.jtree_engine); % violate object privacy
+
+% stuff needed by marginal_nodes
+engine.clpot = [];
+engine.maximize = [];
+engine.T = [];
+
+engine = class(engine, 'jtree_dbn_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+function engine = jtree_dbn_inf_engine(bnet, varargin)
+% JTREE_DBN_INF_ENGINE Junction tree inference algorithm for DBNs.
+
+ss = length(bnet.intra);
+
+onodes = [];
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'observed', onodes = args{i+1};
+ end
+ end
+end
+
+[int, engine.persist, engine.transient] = compute_interface_nodes(bnet.intra, bnet.inter);
+%engine.interface = engine.persist; % WRONG!
+engine.interface = int;
+engine.nonint = mysetdiff(1:ss, int);
+
+
+% Create a 2 slice jtree
+% We force there to be cliques containing the in and out interfaces for slices t and t+1.
+obs_nodes = [onodes(:) onodes(:)+ss];
+engine.jtree_engine = jtree_inf_engine(bnet, 'observed', obs_nodes(:), ...
+ 'clusters', {int, int+ss}, 'root', int+ss);
+
+engine.in_clq = clq_containing_nodes(engine.jtree_engine, int);
+engine.out_clq = clq_containing_nodes(engine.jtree_engine, int+ss);
+engine.jtree_struct = struct(engine.jtree_engine); % violate object privacy
+
+
+
+% Also create an engine just for slice 1
+bnet1 = mk_bnet(bnet.intra1, bnet.node_sizes_slice, bnet.dnodes, bnet.equiv_class(:,1));
+for i=1:max(bnet1.equiv_class)
+ bnet1.CPD{i} = bnet.CPD{i};
+end
+
+engine.jtree_engine1 = jtree_inf_engine(bnet1, 'observed', onodes, 'clusters', {int}, ...
+ 'root', int);
+
+engine.in_clq1 = clq_containing_nodes(engine.jtree_engine1, int);
+engine.jtree_struct1 = struct(engine.jtree_engine1); % violate object privacy
+
+
+
+
+% stuff needed by marginal_nodes
+engine.clpot = [];
+engine.T = [];
+engine.maximize = [];
+
+engine = class(engine, 'jtree_dbn_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,70 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (jtree_dbn)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product instead of sum-product [engine.maximize]
+% softCPDpot{n,t} - use soft potential for node n instead of its CPD; set to [] to use CPD
+% soft_evidence_nodes(i,1:2) = [n t] means the i'th piece of soft evidence is on node n in slice t
+% soft_evidence{i} - prob distribution over values for soft_evidence_nodes(i,:)
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+
+% for add_ev in marginal_nodes
+T = size(evidence, 2);
+engine.evidence = evidence;
+bnet = bnet_from_engine(engine);
+ss = length(bnet.node_sizes_slice);
+ns = bnet.node_sizes_slice(:);
+engine.node_sizes = repmat(ns, [1 T]);
+softCPDpot = cell(ss,T);
+soft_evidence = {};
+soft_evidence_nodes = [];
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', engine.maximize = args{i+1};
+ case 'softCPDpot', softCPDpot = args{i+1};
+ case 'soft_evidence', soft_evidence = args{i+1};
+ case 'soft_evidence_nodes', soft_evidence_nodes = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+engine.jtree_engine = set_fields(engine.jtree_engine, 'maximize', engine.maximize);
+engine.jtree_engine1 = set_fields(engine.jtree_engine1, 'maximize', engine.maximize);
+
+[ss T] = size(evidence);
+engine.T = T;
+observed_bitv = ~isemptycell(evidence);
+onodes = find(observed_bitv);
+pot_type = determine_pot_type(bnet, onodes);
+CPDpot = convert_dbn_CPDs_to_pots(bnet, evidence, pot_type, softCPDpot);
+
+if ~isempty(soft_evidence_nodes)
+ nsoft = size(soft_evidence_nodes,1);
+ for i=1:nsoft
+ n = soft_evidence_nodes(i,1);
+ t = soft_evidence_nodes(i,2);
+ if t==1
+ dom = n;
+ else
+ dom = n+ss;
+ end
+ pot = dpot(dom, ns(n), soft_evidence{i});
+ CPDpot{n,t} = multiply_by_pot(CPDpot{n,t}, pot);
+ end
+end
+
+[engine.clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed_bitv, pot_type);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,126 @@
+function [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified soft evidence to the network (jtree_dbn)
+% [clpot, loglik] = enter_soft_evidence(engine, CPDpot, observed, pot_type)
+
+scale = 1;
+verbose = 0;
+
+[ss T] = size(CPDpot);
+Q = length(engine.jtree_struct.cliques);
+clpot = cell(Q,T); % clpot{t} contains evidence from slices (t-1, t)
+seppot = cell(Q,Q,T);
+ll = zeros(1,Q);
+logscale = zeros(1,T);
+bnet = bnet_from_engine(engine);
+root = engine.jtree_struct.root_clq;
+
+% Forwards pass.
+% Compute distribution on clq C,
+% where C is the out interface to (t-1,t).
+% Then pass this to clq D, where D is the in inferface to (t+1,t).
+
+% Then propagate from D to later slices.
+
+slice1 = 1:ss;
+slice2 = slice1 + ss;
+transient = engine.transient;
+persist = engine.persist;
+Ntransient = length(transient);
+trans = cell(Ntransient,1);
+if verbose, fprintf('forward pass\n'); end
+for t=1:T
+ if verbose, fprintf('%d ', t); end
+ if t==1
+ pots = [CPDpot(:,1); CPDpot(persist, 2)];
+ clqs = engine.jtree_struct.clq_ass_to_node([slice1 persist+ss]);
+ obs = find(observed(:,1:2));
+ elseif t==T
+ clqs = [engine.in_clq1 engine.jtree_struct1.clq_ass_to_node(transient)];
+ phi = set_domain_pot(phi, engine.interface); % shift back to slice 1
+ for i=1:Ntransient
+ trans{i} = CPDpot{transient(i), t};
+ trans{i} = set_domain_pot(trans{i}, domain_pot(trans{i})-ss); % shift back to slice 1
+ end
+ pots = [ {phi}; trans];
+ obs = find(observed(:,T));
+ else
+ clqs = [engine.in_clq engine.jtree_struct.clq_ass_to_node([transient persist+ss])];
+ phi = set_domain_pot(phi, engine.interface); % shift back to slice 1
+ for i=1:Ntransient
+ trans{i} = CPDpot{transient(i), t};
+ trans{i} = set_domain_pot(trans{i}, domain_pot(trans{i})-ss); % shift back to slice 1
+ end
+ pots = [ {phi}; trans; CPDpot(persist, t+1)];
+ obs = find(observed(:,t:t+1));
+ end
+
+ if t < T
+ [clpot(1:Q,t), seppot(1:Q,1:Q,t)] = init_pot(engine.jtree_engine, clqs, pots, pot_type, obs);
+ [clpot(1:Q,t), seppot(1:Q,1:Q,t)] = collect_evidence(engine.jtree_engine, clpot(1:Q,t), seppot(1:Q,1:Q,t));
+ else
+ Q = length(engine.jtree_struct1.cliques);
+ root = engine.jtree_struct1.root_clq;
+ [clpot(1:Q,t), seppot(1:Q,1:Q,t)] = init_pot(engine.jtree_engine1, clqs, pots, pot_type, obs);
+ [clpot(1:Q,t), seppot(1:Q,1:Q,t)] = collect_evidence(engine.jtree_engine1, clpot(1:Q,t), seppot(1:Q,1:Q,t));
+ end
+
+
+ if scale
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ logscale(t) = ll(root);
+ end
+
+ if t < T
+ % bug fix by Bob Welch 30 Jan 04
+ phi = marginalize_pot(clpot{engine.out_clq,t}, engine.interface+ss,engine.maximize);
+ %phi = marginalize_pot(clpot{root,t}, engine.interface+ss, engine.maximize);
+ end
+end
+
+if scale
+loglik = sum(logscale);
+else
+loglik = [];
+end
+
+
+% Backwards pass.
+% Pass evidence from clq C to clq D,
+% where C is the in interface to (t,t+1) and D is the out inferface to (t-1,t)
+% Then propagate evidence from D to earlier slices.
+% (C and D are reversed names from the tech report!)
+D = engine.out_clq;
+if verbose, fprintf('\nbackwards pass\n'); end
+for t=T:-1:1
+ if verbose, fprintf('%d ', t); end
+
+ if t == T
+ Q = length(engine.jtree_struct1.cliques);
+ C = engine.in_clq1;
+ [clpot(1:Q,t), seppot(1:Q,1:Q,t)] = distribute_evidence(engine.jtree_engine1, clpot(1:Q,t), seppot(1:Q,1:Q,t));
+ else
+ Q = length(engine.jtree_struct.cliques);
+ C = engine.in_clq;
+ [clpot(1:Q,t), seppot(1:Q,1:Q,t)] = distribute_evidence(engine.jtree_engine, clpot(1:Q,t), seppot(1:Q,1:Q,t));
+ end
+
+ if scale
+ for c=1:Q
+ [clpot{c,t}, ll(c)] = normalize_pot(clpot{c,t});
+ end
+ end
+
+ if t >= 2
+ phiC = marginalize_pot(clpot{C,t}, engine.interface, engine.maximize);
+ phiC = set_domain_pot(phiC, engine.interface+ss); % shift forward to slice 2
+ phiD = marginalize_pot(clpot{D,t-1}, engine.interface+ss, engine.maximize);
+ ratio = divide_by_pot(phiC, phiD);
+ clpot{D,t-1} = multiply_by_pot(clpot{D,t-1}, ratio);
+ end
+end
+if verbose, fprintf('\n'); end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/jtree_dbn_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/jtree_dbn_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,109 @@
+function engine = jtree_dbn_inf_engine(bnet, varargin)
+% JTREE_DBN_INF_ENGINE Junction tree inference algorithm for DBNs.
+% engine = jtree_inf_engine(bnet, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% clusters - specifies variables that must be grouped in the 1.5 slice DBN
+% maximize - 1 means max-product, 0 means sum-product [0]
+%
+% e.g., engine = jtree_dbn_inf_engine(dbn, 'clusters', {[1 2]});
+%
+% This uses all of slice t-1 plus the backwards interface of slice t.
+% By contrast, jtree_2TBN_inf_engine in the online directory uses
+% the forwards interface of slice t-1 plus all of slice t.
+% See my thesis for details.
+
+ss = length(bnet.intra);
+
+engine.maximize = 0;
+clusters = {};
+
+args = varargin;
+for i=1:2:length(args)
+ switch args{i},
+ case 'clusters', clusters = args{i+1};
+ case 'maximize', engine.maximize = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+end
+
+
+engine.evidence = [];
+engine.node_sizes = [];
+
+[int, engine.persist, engine.transient] = compute_interface_nodes(bnet.intra, bnet.inter);
+engine.interface = int;
+engine.nonint = mysetdiff(1:ss, int);
+
+onodes = bnet.observed;
+
+if 0
+ % Create a 2 slice jtree
+ % We force there to be cliques containing the in and out interfaces for slices t and t+1.
+ obs_nodes = [onodes(:) onodes(:)+ss];
+ engine.jtree_engine = jtree_inf_engine(bnet, 'observed', obs_nodes(:), ...
+ 'clusters', {int, int+ss}, 'root', int+ss);
+else
+ % Create a "1.5 slice" jtree, containing slice 1 and the interface nodes of slice 2
+ % To keep the node numbering the same, we simply disconnect the non-interface nodes
+ % from slice 2, and set their size to 1.
+ % We do this to speed things up, and so that the likelihood is computed correctly - we do not need to do
+ % this if we just want to compute marginals.
+ intra15 = bnet.intra;
+ for i=engine.nonint(:)'
+ intra15(i,:) = 0;
+ intra15(:,i) = 0;
+ end
+ dag15 = [bnet.intra bnet.inter;
+ zeros(ss) intra15];
+ ns = bnet.node_sizes(:);
+ ns(engine.nonint+ss) = 1; % disconnected nodes get size 1
+ obs_nodes = [onodes(:) onodes(:)+ss];
+ bnet15 = mk_bnet(dag15, ns, 'discrete', bnet.dnodes, 'equiv_class', bnet.equiv_class(:), ...
+ 'observed', obs_nodes(:));
+
+ %bnet15 = mk_dbn(intra15, bnet.inter, bnet.node_sizes_slice, bnet.dnodes_slice, ...
+ % bnet.equiv_class(:,1), bnet.equiv_class(:,2), bnet.intra);
+ % with the dbn, we can't independently control the sizes of slice 2 nodes
+
+ if 1
+ % use unconstrained elimination,
+ % but force there to be a clique containing both interfaces
+ clusters(end+1:end+2) = {int, int+ss};
+ engine.jtree_engine = jtree_inf_engine(bnet15, 'clusters', clusters, 'root', int+ss);
+ else
+ % Use constrained elimination - this induces a clique that contain the 2nd interface,
+ % but not the first.
+ % Hence we throw in the first interface as an extra.
+ stages = {1:ss, [1:ss]+ss};
+ clusters(end+1:end+2) = {int, int+ss};
+ engine.jtree_engine = jtree_inf_engine(bnet15, 'clusters', clusters, ...
+ 'stages', stages, 'root', int+ss);
+ end
+end
+
+engine.in_clq = clq_containing_nodes(engine.jtree_engine, int);
+engine.out_clq = clq_containing_nodes(engine.jtree_engine, int+ss);
+engine.jtree_struct = struct(engine.jtree_engine); % violate object privacy
+
+
+% Also create an engine just for slice 1
+bnet1 = mk_bnet(bnet.intra1, bnet.node_sizes_slice, 'discrete', myintersect(bnet.dnodes,1:ss), ...
+ 'equiv_class', bnet.equiv_class(:,1), 'observed', onodes);
+for i=1:max(bnet1.equiv_class)
+ bnet1.CPD{i} = bnet.CPD{i};
+end
+
+engine.jtree_engine1 = jtree_inf_engine(bnet1, 'clusters', {int}, 'root', int);
+
+engine.in_clq1 = clq_containing_nodes(engine.jtree_engine1, int);
+engine.jtree_struct1 = struct(engine.jtree_engine1); % violate object privacy
+
+% stuff needed by marginal_nodes
+engine.clpot = [];
+engine.T = [];
+
+engine = class(engine, 'jtree_dbn_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function m = marginal_family(engine, i, t, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (jtree_dbn)
+% marginal = marginal_family(engine, i, t)
+
+% This is just like inf_engine/marginal_family, except when we call
+% marginal_nodes, we provide a 4th argument, to tell it's a family.
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+
+bnet = bnet_from_engine(engine);
+if t==1
+ m = marginal_nodes(engine, family(bnet.dag, i), t, add_ev, 1);
+else
+ ss = length(bnet.intra);
+ fam = family(bnet.dag, i+ss);
+ if any(fam<=ss)
+ % i has a parent in the preceeding slice
+ % Hence the lowest numbered slice containing the family is t-1
+ m = marginal_nodes(engine, fam, t-1, add_ev, 1);
+ else
+ % The family all fits inside slice t
+ % Hence shift the indexes back to slice 1
+ m = marginal_nodes(engine, fam-ss, t, add_ev, 1);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_dbn_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,66 @@
+function marginal = marginal_nodes(engine, nodes, t, add_ev, fam)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (bk)
+%
+% marginal = marginal_nodes(engine, i, t)
+% returns Pr(X(i,t) | Y(1:T)), where X(i,t) is the i'th node in the t'th slice.
+%
+% marginal = marginal_nodes(engine, query, t)
+% returns Pr(X(query(1),t), ... X(query(end),t) | Y(1:T)),
+% where 't' specifies the time slice of the earliest node in the query.
+% 'query' cannot span more than 2 time slices.
+%
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3.
+%
+% marginal = marginal_nodes(engine, nodes, t, add_ev, fam)
+% add_ev is an optional argument; if 1, we will "inflate" the marginal of observed nodes
+% to their original size, adding 0s to the positions which contradict the evidence
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+if nargin < 5, fam = 0; end
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+
+if t==1 | t==engine.T
+ slice = t;
+ nodes2 = nodes;
+elseif mysubset(nodes, engine.persist)
+ slice = t-1;
+ nodes2 = nodes+ss;
+else
+ slice = t;
+ nodes2 = nodes;
+end
+
+%disp(['computing marginal on ' num2str(nodes) ' t = ' num2str(t) ' fam = ' num2str(fam)]);
+
+if t==engine.T
+ c = clq_containing_nodes(engine.jtree_engine1, nodes2, fam);
+else
+ c = clq_containing_nodes(engine.jtree_engine, nodes2, fam);
+end
+if c == -1
+ error(['no clique contains ' nodes2])
+end
+
+
+%disp(['using ' num2str(nodes2) ' slice = ' num2str(slice) ' clq = ' num2str(c)]);
+
+bigpot = engine.clpot{c, slice};
+
+pot = marginalize_pot(bigpot, nodes2, engine.maximize);
+%pot = normalize_pot(pot);
+marginal = pot_to_marginal(pot);
+
+
+% we convert the domain to the unrolled numbering system
+% so that add_ev_to_dmarginal (maybe called in update_ess) extracts the right evidence.
+marginal.domain = nodes+(t-1)*ss;
+
+if add_ev
+ marginal = add_ev_to_dmarginal(marginal, engine.evidence, engine.node_sizes);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/jtree_unrolled_dbn_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/update_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function marginal = marginal_family(engine, i, t)
+% MARGINAL_FAMILY Compute the marginal on the specified family (jtree_unrolled_dbn)
+% marginal = marginal_family(engine, i, t)
+
+if nargin < 3, t = 1; end
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+marginal = marginal_family(engine.sub_engine, i + (t-1)*ss);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function marginal = marginal_nodes(engine, nodes, t)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (jtree_unrolled_dbn)
+% marginal = marginal_nodes(engine, nodes, t)
+%
+% 't' specifies the time slice of the earliest node in 'nodes'.
+% 'nodes' must occur in some clique.
+%
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3,
+% i.e., nodes 3 and 5 in the unrolled network,
+
+if nargin < 3, t = 1; end
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+query = nodes + (t-1)*ss;
+marginal = marginal_nodes(engine.sub_engine, query);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (jtree_unrolled_dbn)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product instead of sum-product [0]
+% filter - if 1, does filtering (not supported), else smoothing [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+if filter
+ error('jtree_unrolled_dbn does not support filtering')
+end
+
+if size(evidence,2) ~= engine.nslices
+ error(['engine was created assuming there are ' num2str(engine.nslices) ...
+ ' slices, but evidence has ' num2str(size(evidence,2))])
+end
+
+[engine.unrolled_engine, loglik] = enter_evidence(engine.unrolled_engine, evidence, 'maximize', maximize);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/jtree_unrolled_dbn_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/jtree_unrolled_dbn_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+function engine = jtree_unrolled_dbn_inf_engine(bnet, T, varargin)
+% JTREE_UNROLLED_DBN_INF_ENGINE Unroll the DBN for T time-slices and apply jtree to the resulting static net
+% engine = jtree_unrolled_dbn_inf_engine(bnet, T, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% useC - 1 means use jtree_C_inf_engine instead of jtree_inf_engine [0]
+% constrained - 1 means we constrain ourselves to eliminate slice t before t+1 [1]
+%
+% e.g., engine = jtree_unrolled_inf_engine(bnet, 'useC', 1);
+
+% set default params
+N = length(bnet.intra);
+useC = 0;
+constrained = 1;
+
+if nargin >= 3
+ args = varargin;
+ nargs = length(args);
+ if isstr(args{1})
+ for i=1:2:nargs
+ switch args{i},
+ case 'useC', useC = args{i+1};
+ case 'constrained', constrained = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+ else
+ error(['invalid argument name ' args{1}]);
+ end
+end
+
+bnet2 = dbn_to_bnet(bnet, T);
+ss = length(bnet.intra);
+engine.ss = ss;
+
+% If constrained_order = 1 we constrain ourselves to eliminate slice t before t+1.
+% This prevents cliques containing nodes from far-apart time-slices.
+if constrained
+ stages = num2cell(unroll_set(1:ss, ss, T), 1);
+else
+ stages = { 1:length(bnet2.dag) };
+end
+if useC
+ jengine = jtree_C_inf_engine(bnet2, 'stages', stages);
+else
+ jengine = jtree_inf_engine(bnet2, 'stages', stages);
+end
+
+engine.unrolled_engine = jengine;
+% we don't inherit from jtree_inf_engine, because that would only store bnet2,
+% and we would lose access to the DBN-specific fields like intra/inter
+
+engine.nslices = T;
+engine = class(engine, 'jtree_unrolled_dbn_inf_engine', inf_engine(bnet));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function marginal = marginal_family(engine, i, t, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (jtree_unrolled_dbn)
+% marginal = marginal_family(engine, i, t)
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+assert(~add_ev);
+
+%marginal = marginal_family(engine.unrolled_engine, i + (t-1)*engine.ss, add_ev);
+marginal = marginal_family(engine.unrolled_engine, i + (t-1)*engine.ss);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function marginal = marginal_nodes(engine, nodes, t, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (loopy_unrolled_dbn)
+% marginal = marginal_nodes(engine, nodes, t)
+%
+% 't' specifies the time slice of the earliest node in 'nodes'.
+% 'nodes' must occur in some clique.
+%
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3,
+% i.e., nodes 3 and 5 in the unrolled network,
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+
+marginal = marginal_nodes(engine.unrolled_engine, nodes + (t-1)*engine.ss, add_ev);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@jtree_unrolled_dbn_inf_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (jtree_unrolled_dbn)
+% engine = update_engine(engine, newCPDs)
+
+engine.inf_engine = update_engine(engine.inf_engine, newCPDs);
+engine.unrolled_engine = update_engine(engine.unrolled_engine, newCPDs);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/kalman_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/update_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@kalman_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,83 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (kalman)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product (same as sum-product for Gaussians!), else sum-product [0]
+% filter - if 1, do filtering, else smoothing [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+assert(~maximize);
+
+bnet = bnet_from_engine(engine);
+n = length(bnet.intra);
+onodes = bnet.observed;
+hnodes = mysetdiff(1:n, onodes);
+T = size(evidence, 2);
+ns = bnet.node_sizes;
+O = sum(ns(onodes));
+data = reshape(cat(1, evidence{onodes,:}), [O T]);
+
+A = engine.trans_mat;
+C = engine.obs_mat;
+Q = engine.trans_cov;
+R = engine.obs_cov;
+init_x = engine.init_state;
+init_V = engine.init_cov;
+
+if filter
+ [x, V, VV, loglik] = kalman_filter(data, A, C, Q, R, init_x, init_V);
+else
+ [x, V, VV, loglik] = kalman_smoother(data, A, C, Q, R, init_x, init_V);
+end
+
+
+% Wrap the posterior inside a potential, so it can be marginalized easily
+engine.one_slice_marginal = cell(1,T);
+engine.two_slice_marginal = cell(1,T);
+ns(onodes) = 0;
+ns(onodes+n) = 0;
+ss = length(bnet.intra);
+for t=1:T
+ dom = (1:n);
+ engine.one_slice_marginal{t} = mpot(dom+(t-1)*ss, ns(dom), 1, x(:,t), V(:,:,t));
+end
+% for t=1:T-1
+% dom = (1:(2*n));
+% mu = [x(:,t); x(:,t)];
+% Sigma = [V(:,:,t) VV(:,:,t+1)';
+% VV(:,:,t+1) V(:,:,t+1)];
+% engine.two_slice_marginal{t} = mpot(dom+(t-1)*ss, ns(dom), 1, mu, Sigma);
+% end
+for t=2:T
+ %dom = (1:(2*n));
+ current_slice = hnodes;
+ next_slice = hnodes + ss;
+ dom = [current_slice next_slice];
+ mu = [x(:,t-1); x(:,t)];
+ Sigma = [V(:,:,t-1) VV(:,:,t)';
+ VV(:,:,t) V(:,:,t)];
+ engine.two_slice_marginal{t-1} = mpot(dom+(t-2)*ss, ns(dom), 1, mu, Sigma);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/kalman_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/kalman_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+function engine = kalman_inf_engine(bnet)
+% KALMAN_INF_ENGINE Inference engine for Linear-Gaussian state-space models.
+% engine = kalman_inf_engine(bnet)
+%
+% 'onodes' specifies which nodes are observed; these must be leaves.
+% The remaining nodes are all hidden. All nodes must have linear-Gaussian CPDs.
+% The hidden nodes must be persistent, i.e., they must have children in
+% the next time slice. In addition, they may not have any children within the current slice,
+% except to the observed leaves. In other words, the topology must be isomorphic to a standard LDS.
+%
+% There are many derivations of the filtering and smoothing equations for Linear Dynamical
+% Systems in the literature. I particularly like the following
+% - "From HMMs to LDSs", T. Minka, MIT Tech Report, (no date), available from
+% ftp://vismod.www.media.mit.edu/pub/tpminka/papers/minka-lds-tut.ps.gz
+
+[engine.trans_mat, engine.trans_cov, engine.obs_mat, engine.obs_cov, engine.init_state, engine.init_cov] = ...
+ dbn_to_lds(bnet);
+
+% This is where we will store the results between enter_evidence and marginal_nodes
+engine.one_slice_marginal = [];
+engine.two_slice_marginal = [];
+
+engine = class(engine, 'kalman_inf_engine', inf_engine(bnet));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function marginal = marginal_nodes(engine, nodes, t)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (kalman)
+% marginal = marginal_nodes(engine, nodes, t)
+%
+% 't' specifies the time slice of the earliest node in 'nodes'.
+% 'nodes' cannot span more than 2 time slices.
+%
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3,
+% i.e., nodes 3 and 5 in the unrolled network,
+
+if nargin < 3, t = 1; end
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+if all(nodes <= ss)
+ bigpot = engine.one_slice_marginal{t};
+else
+ bigpot = engine.two_slice_marginal{t};
+end
+
+nodes = nodes + (t-1)*ss;
+pot = marginalize_pot(bigpot, nodes);
+marginal = pot_to_marginal(pot);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+/dbn_to_lds.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/extract_params_from_gbn.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@kalman_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/dbn_to_lds.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/dbn_to_lds.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function [trans_mat, trans_cov, obs_mat, obs_cov, init_state, init_cov] = dbn_to_lds(bnet)
+% DBN_TO_LDS Compute the Linear Dynamical System parameters from the Gaussian DBN.
+% [trans_mat, trans_cov, obs_mat, obs_cov, init_state, init_cov] = dbn_to_lds(bnet)
+
+onodes = bnet.observed;
+ss = length(bnet.intra);
+num_nodes = ss*2;
+assert(isequal(bnet.cnodes_slice, 1:ss));
+[W,D,mu] = extract_params_from_gbn(bnet);
+
+hnodes = mysetdiff(1:ss, onodes);
+bs = bnet.node_sizes(:); % block sizes
+
+obs_mat = W(block(hnodes,bs), block(onodes,bs))';
+u = block(onodes,bs);
+obs_cov = D(u,u);
+
+trans_mat = W(block(hnodes,bs), block(hnodes + ss, bs))';
+u = block(hnodes + ss, bs);
+trans_cov = D(u,u);
+
+u = block(hnodes,bs);
+init_cov = D(u,u);
+init_state = mu(u);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/extract_params_from_gbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/private/extract_params_from_gbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function [B,D,mu] = extract_params_from_gbn(bnet)
+% Extract all the local parameters of each Gaussian node, and collect them into global matrices.
+% [B,D,mu] = extract_params_from_gbn(bnet)
+%
+% B(i,j) is a block matrix that contains the transposed weight matrix from node i to node j.
+% D(i,i) is a block matrix that contains the noise covariance matrix for node i.
+% mu(i) is a block vector that contains the shifted noise mean for node i.
+
+% In Shachter's model, the mean of each node in the global gaussian is
+% the same as the node's local unconditional mean.
+% In Alag's model (which we use), the global mean gets shifted.
+
+
+num_nodes = length(bnet.dag);
+bs = bnet.node_sizes(:); % bs = block sizes
+N = sum(bs); % num scalar nodes
+
+B = zeros(N,N);
+D = zeros(N,N);
+mu = zeros(N,1);
+
+for i=1:num_nodes % in topological order
+ ps = parents(bnet.dag, i);
+ e = bnet.equiv_class(i);
+ %[m, Sigma, weights] = extract_params_from_CPD(bnet.CPD{e});
+ s = struct(bnet.CPD{e}); % violate privacy of object
+ m = s.mean; Sigma = s.cov; weights = s.weights;
+ if length(ps) == 0
+ mu(block(i,bs)) = m;
+ else
+ mu(block(i,bs)) = m + weights * mu(block(ps,bs));
+ end
+ B(block(ps,bs), block(i,bs)) = weights';
+ D(block(i,bs), block(i,bs)) = Sigma;
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@kalman_inf_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (kalman)
+% engine = update_engine(engine, newCPDs)
+
+engine.inf_engine = update_engine(engine.inf_engine, newCPDs);
+[engine.trans_mat, engine.trans_cov, engine.obs_mat, engine.obs_cov, engine.init_state, engine.init_cov] = ...
+ dbn_to_lds(bnet_from_engine(engine));
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_ev.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/pearl_dbn_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D/Old////
+D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@pearl_dbn_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+/correct_smooth.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/filter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/filter_evidence_obj_oriented.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/smooth_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/smooth_evidence_fast.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/wrong_smooth.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@pearl_dbn_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/correct_smooth.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/correct_smooth.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,244 @@
+function [marginal, msg, loglik] = smooth_evidence(engine, evidence)
+% [marginal, msg, loglik] = smooth_evidence(engine, evidence) (pearl_dbn)
+
+disp('warning: broken');
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+bnet2 = dbn_to_bnet(bnet, T);
+ns = bnet2.node_sizes;
+hnodes = mysetdiff(1:ss, engine.onodes);
+hnodes = hnodes(:)';
+
+onodes2 = unroll_set(engine.onodes(:), ss, T);
+onodes2 = onodes2(:)';
+
+hnodes2 = unroll_set(hnodes(:), ss, T);
+hnodes2 = hnodes2(:)';
+
+[engine.parent_index, engine.child_index] = mk_pearl_msg_indices(bnet2);
+
+msg = init_msgs(bnet2.dag, ns, evidence, bnet2.equiv_class, bnet2.CPD);
+
+verbose = 0;
+
+niter = 1;
+for iter=1:niter
+ % FORWARD
+ for t=1:T
+ if verbose, fprintf('t=%d\n', t); end
+ % observed leaves send lambda to parents
+ for i=engine.onodes(:)'
+ n = i + (t-1)*ss;
+ ps = parents(bnet2.dag, n);
+ for p=ps(:)'
+ j = engine.child_index{p}(n); % n is p's j'th child
+ if t > 1
+ e = bnet.equiv_class(i, 2);
+ else
+ e = bnet.equiv_class(i, 1);
+ end
+ lam_msg = normalise(compute_lambda_msg(bnet.CPD{e}, n, ps, msg, p));
+ msg{p}.lambda_from_child{j} = lam_msg;
+ if verbose, fprintf('%d sends lambda to %d\n', n, p); disp(lam_msg); end
+ end
+ end
+
+ % update pi
+ for i=hnodes
+ n = i + (t-1)*ss;
+ ps = parents(bnet2.dag, n);
+ if t==1
+ e = bnet.equiv_class(i,1);
+ else
+ e = bnet.equiv_class(i,2);
+ end
+ msg{n}.pi = compute_pi(bnet.CPD{e}, n, ps, msg);
+ if verbose, fprintf('%d computes pi\n', n); disp(msg{n}.pi); end
+ end
+
+ % send pi msg to children
+ for i=hnodes
+ n = i + (t-1)*ss;
+ %cs = myintersect(children(bnet2.dag, n), hnodes2);
+ cs = children(bnet2.dag, n);
+ for c=cs(:)'
+ j = engine.parent_index{c}(n); % n is c's j'th parent
+ pi_msg = normalise(compute_pi_msg(n, cs, msg, c, ns));
+ msg{c}.pi_from_parent{j} = pi_msg;
+ if verbose, fprintf('%d sends pi to %d\n', n, c); disp(pi_msg); end
+ end
+ end
+ end
+
+ % BACKWARD
+ for t=T:-1:1
+ if verbose, fprintf('t = %d\n', t); end
+ % update lambda
+ for i=hnodes
+ n = i + (t-1)*ss;
+ cs = children(bnet2.dag, n);
+ msg{n}.lambda = compute_lambda(n, cs, msg, ns);
+ if verbose, fprintf('%d computes lambda\n', n); disp(msg{n}.lambda); end
+ end
+ % send lambda msgs to parents
+ for i=hnodes
+ n = i + (t-1)*ss;
+ %ps = myintersect(parents(bnet2.dag, n), hnodes2);
+ ps = parents(bnet2.dag, n);
+ for p=ps(:)'
+ j = engine.child_index{p}(n); % n is p's j'th child
+ if t > 1
+ e = bnet.equiv_class(i, 2);
+ else
+ e = bnet.equiv_class(i, 1);
+ end
+ lam_msg = normalise(compute_lambda_msg(bnet.CPD{e}, n, ps, msg, p));
+ msg{p}.lambda_from_child{j} = lam_msg;
+ if verbose, fprintf('%d sends lambda to %d\n', n, p); disp(lam_msg); end
+ end
+ end
+ end
+
+end
+
+
+marginal = cell(ss,T);
+lik = zeros(1,ss*T);
+for t=1:T
+ for i=1:ss
+ n = i + (t-1)*ss;
+ [bel, lik(n)] = normalise(msg{n}.pi .* msg{n}.lambda);
+ marginal{i,t} = bel;
+ end
+end
+
+loglik = sum(log(lik));
+
+
+
+%%%%%%%
+
+function lambda = compute_lambda(n, cs, msg, ns)
+% Pearl p183 eq 4.50
+lambda = prod_lambda_msgs(n, cs, msg, ns);
+
+%%%%%%%
+
+function pi_msg = compute_pi_msg(n, cs, msg, c, ns)
+% Pearl p183 eq 4.53 and 4.51
+pi_msg = msg{n}.pi .* prod_lambda_msgs(n, cs, msg, ns, c);
+
+%%%%%%%%%
+
+function lam = prod_lambda_msgs(n, cs, msg, ns, except)
+
+if nargin < 5, except = -1; end
+
+lam = msg{n}.lambda_from_self(:);
+lam = ones(ns(n), 1);
+for i=1:length(cs)
+ c = cs(i);
+ if c ~= except
+ lam = lam .* msg{n}.lambda_from_child{i};
+ end
+end
+
+
+%%%%%%%%%
+
+function msg = init_msgs(dag, ns, evidence, eclass, CPD)
+% INIT_MSGS Initialize the lambda/pi message and state vectors (pearl_dbn)
+% msg = init_msgs(dag, ns, evidence)
+
+N = length(dag);
+msg = cell(1,N);
+observed = ~isemptycell(evidence(:));
+
+for n=1:N
+ ps = parents(dag, n);
+ msg{n}.pi_from_parent = cell(1, length(ps));
+ for i=1:length(ps)
+ p = ps(i);
+ msg{n}.pi_from_parent{i} = ones(ns(p), 1);
+ end
+
+ cs = children(dag, n);
+ msg{n}.lambda_from_child = cell(1, length(cs));
+ for i=1:length(cs)
+ c = cs(i);
+ msg{n}.lambda_from_child{i} = ones(ns(n), 1);
+ end
+
+ msg{n}.lambda = ones(ns(n), 1);
+ msg{n}.lambda_from_self = ones(ns(n), 1);
+ msg{n}.pi = ones(ns(n), 1);
+
+ % Initialize the lambdas with any evidence
+ if observed(n)
+ v = evidence{n};
+ %msg{n}.lambda_from_self = zeros(ns(n), 1);
+ %msg{n}.lambda_from_self(v) = 1; % delta function
+ msg{n}.lambda = zeros(ns(n), 1);
+ msg{n}.lambda(v) = 1; % delta function
+ end
+
+end
+
+
+%%%%%%%%
+
+function msg = init_ev_msgs(engine, evidence, msg)
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+pot_type = 'd';
+t = 1;
+hnodes = mysetdiff(1:ss, engine.onodes);
+for i=engine.onodes(:)'
+ fam = family(bnet.dag, i);
+ e = bnet.equiv_class(i, 1);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,1));
+ temp = pot_to_marginal(CPDpot);
+ msg{i}.lambda_from_self = temp.T;
+end
+for t=2:T
+ for i=engine.onodes(:)'
+ fam = family(bnet.dag, i, 2); % extract from slice t
+ e = bnet.equiv_class(i, 2);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,t-1:t));
+ temp = pot_to_marginal(CPDpot);
+ n = i + (t-1)*ss;
+ msg{n}.lambda_from_self = temp.T;
+ end
+end
+
+
+%%%%%%%%%%%
+
+function msg = init_ev_msgs2(engine, evidence, msg)
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+pot_type = 'd';
+t = 1;
+hnodes = mysetdiff(1:ss, engine.onodes);
+for i=engine.onodes(:)'
+ fam = family(bnet.dag, i);
+ e = bnet.equiv_class(i, 1);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,1));
+ temp = pot_to_marginal(CPDpot);
+ msg{i}.lambda_from_self = temp.T;
+end
+for t=2:T
+ for i=engine.onodes(:)'
+ fam = family(bnet.dag, i, 2); % extract from slice t
+ e = bnet.equiv_class(i, 2);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,t-1:t));
+ temp = pot_to_marginal(CPDpot);
+ n = i + (t-1)*ss;
+ msg{n}.lambda_from_self = temp.T;
+ end
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,123 @@
+function [engine, loglik] = enter_evidence(engine, evidence, filter)
+% ENTER_EVIDENCE Add the specified evidence to the network (pearl_dbn)
+% [engine, loglik] = enter_evidence(engine, evidence, filter)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+% If filter = 1, we do filtering, otherwise smoothing (default).
+
+if nargin < 3, filter = 0; end
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+bnet2 = dbn_to_bnet(bnet, T);
+ns = bnet2.node_sizes;
+hnodes = mysetdiff(1:ss, engine.onodes);
+hnodes = hnodes(:)';
+
+[engine.parent_index, engine.child_index] = mk_pearl_msg_indices(bnet2);
+
+msg = init_msgs(bnet2.dag, ns, evidence);
+msg = init_ev_msgs(engine, evidence, msg);
+
+niter = 1;
+for iter=1:niter
+ % FORWARD
+ for t=1:T
+ % update pi
+ for i=1:ss %hnodes
+ n = i + (t-1)*ss;
+ ps = parents(bnet2.dag, n);
+ if t==1
+ e = bnet.equiv_class(i,1);
+ else
+ e = bnet.equiv_class(i,2);
+ end
+ msg{n}.pi = compute_pi(bnet.CPD{e}, n, ps, msg);
+ %msg{n}.pi = normalise(msg{n}.pi(:) .* msg{n}.lambda_from_self(:));
+ end
+ % send pi msg to children
+ for i=1:ss % hnodes
+ n = i + (t-1)*ss;
+ cs = children(bnet2.dag, n);
+ for c=cs(:)'
+ j = engine.parent_index{c}(n); % n is c's j'th parent
+ msg{c}.pi_from_parent{j} = normalise(compute_pi_msg(n, cs, msg, c, ns));
+ end
+ end
+ end
+
+ if filter
+ disp('skipping smoothing');
+ break;
+ end
+
+ % BACKWARD
+ for t=T:-1:1
+ % update lambda
+ for i=1:ss % hnodes
+ n = i + (t-1)*ss;
+ cs = children(bnet2.dag, n);
+ msg{n}.lambda = compute_lambda(n, cs, msg, ns);
+ end
+ % send lambda msgs to parents
+ for i=1:ss % hnodes
+ n = i + (t-1)*ss;
+ ps = parents(bnet2.dag, n);
+ for p=ps(:)'
+ j = engine.child_index{p}(n); % n is p's j'th child
+ if t > 1
+ e = bnet.equiv_class(i, 2);
+ else
+ e = bnet.equiv_class(i, 1);
+ end
+ msg{p}.lambda_from_child{j} = normalise(compute_lambda_msg(bnet.CPD{e}, n, ps, msg, p));
+ end
+ end
+ end
+
+end
+
+
+engine.marginal = cell(ss,T);
+lik = zeros(1,ss*T);
+for t=1:T
+ for i=1:ss
+ n = i + (t-1)*ss;
+ [bel, lik(n)] = normalise(msg{n}.pi .* msg{n}.lambda);
+ engine.marginal{i,t} = bel;
+ end
+end
+
+engine.evidence = evidence; % needed by marginal_nodes and marginal_family
+engine.msg = msg; % needed by marginal_family
+loglik = sum(log(lik));
+
+
+
+%%%%%%%
+
+function lambda = compute_lambda(n, cs, msg, ns)
+% Pearl p183 eq 4.50
+lambda = prod_lambda_msgs(n, cs, msg, ns);
+
+%%%%%%%
+
+function pi_msg = compute_pi_msg(n, cs, msg, c, ns)
+% Pearl p183 eq 4.53 and 4.51
+pi_msg = msg{n}.pi .* prod_lambda_msgs(n, cs, msg, ns, c);
+
+%%%%%%%%%
+
+function lam = prod_lambda_msgs(n, cs, msg, ns, except)
+
+if nargin < 5, except = -1; end
+
+lam = msg{n}.lambda_from_self(:);
+%lam = ones(ns(n), 1);
+for i=1:length(cs)
+ c = cs(i);
+ if c ~= except
+ lam = lam .* msg{n}.lambda_from_child{i};
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,146 @@
+function [marginal, msg, loglik] = filter_evidence(engine, evidence)
+
+error('broken');
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+onodes = engine.onodes;
+hnodes = mysetdiff(1:ss, onodes);
+hnodes = hnodes(:)';
+
+ns = bnet.node_sizes(:);
+onodes2 = [onodes(:); onodes(:)+ss];
+ns(onodes2) = 1;
+
+verbose = 1;
+if verbose, fprintf('\nnew filtering\n'); end
+
+pot_type = 'd';
+niter = engine.max_iter;
+
+% msg(i1,t1,i2,j2) (i1,t1) -> (i2,t2)
+%lambda_msg = cell(ss,T,ss,T);
+%pi_msg = cell(ss,T,ss,T);
+
+% intra_lambda_msg(i,j,t) (i,t) -> (j,t), i is child
+% inter_lambda_msg(i,j,t) (i,t+1) -> (j,t), i is child
+% inter_pi_msg(i,j,t) (i,t-1) -> (j,t), i is parent
+intra_lambda_msg = cell(ss,ss,T);
+inter_lambda_msg = cell(ss,ss,T);
+inter_pi_msg = cell(ss,ss,T);
+
+lambda = cell(ss,T);
+pi = cell(ss,T);
+
+for t=1:T
+ for i=1:ss
+ lambda{i,t} = ones(ns(i), 1);
+ pi{i,t} = ones(ns(i), 1);
+
+ cs = children(bnet.intra, i);
+ for c=cs(:)'
+ intra_lambda_msg{c,i,t} = ones(ns(i),1);
+ end
+
+ cs = children(bnet.inter, i);
+ for c=cs(:)'
+ inter_lambda_msg{c,i,t} = ones(ns(i),1);
+ end
+
+ ps = parents(bnet.inter, i);
+ for p=ps(:)'
+ inter_pi_msg{p,i,t} = ones(ns(i), 1); % not used for t==1
+ end
+ end
+end
+
+% each hidden node absorbs lambda from its observed child (if any)
+for t=1:T
+ for i=hnodes
+ c = engine.obschild(i);
+ if c > 0
+ if t==1
+ fam = family(bnet.dag, c);
+ e = bnet.equiv_class(c, 1);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,1));
+ else
+ fam = family(bnet.dag, c, 2); % within 2 slice network
+ e = bnet.equiv_class(c, 2);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,t-1:t));
+ end
+ temp = pot_to_marginal(CPDpot);
+ lam_msg = normalise(temp.T);
+ %if verbose, fprintf('(%d,%d) sends lambda to (%d,%d)\n', c,t, i,t); disp(lam_msg); end
+ intra_lambda_msg{c,i,t} = lam_msg;
+ end
+ end
+end
+
+% FORWARD
+for t=1:T
+ % update pi
+ for i=hnodes
+ if t==1
+ e = bnet.equiv_class(i,1);
+ temp = struct(bnet.CPD{e});
+ pi{i,t} = temp.CPT;
+ else
+ e = bnet.equiv_class(i,2);
+ temp = struct(bnet.CPD{e});
+ ps = parents(bnet.inter, i);
+ dom = [ps i+ss];
+ pot = dpot(dom, ns(dom), temp.CPT);
+ for p=ps(:)'
+ temp = dpot(p, ns(p), inter_pi_msg{p,i,t});
+ pot = multiply_by_pot(pot, temp);
+ end
+ pot = marginalize_pot(pot, i+ss);
+ temp = pot_to_marginal(pot);
+ pi{i,t} = temp.T;
+ %if verbose, fprintf('(%d,%d) computes pi\n', i,t); disp(pi{i,t}); end
+ end
+
+ c = engine.obschild(i);
+ if c > 0
+ pi{i,t} = normalise(pi{i,t} .* intra_lambda_msg{c,i,t});
+ end
+ %if verbose, fprintf('(%d,%d) recomputes pi\n', i,t); disp(pi{i,t}); end
+ if verbose, fprintf('%d recomputes pi\n', i+(t-1)*ss); disp(pi{i,t}); end
+ end
+
+ % send pi msg to children
+ for i=hnodes
+ cs = children(bnet.inter, i);
+ for c=cs(:)'
+ pot = pi{i,t};
+ for k=cs(:)'
+ if k ~= c
+ pot = pot .* inter_lambda_msg{k,i,t};
+ end
+ end
+ cs2 = children(bnet.intra, i);
+ for k=cs2(:)'
+ pot = pot .* intra_lambda_msg{k,i,t};
+ end
+ pot = normalise(pot);
+ %if verbose, fprintf('(%d,%d) sends pi to (%d,%d)\n', i,t, c,t+1); disp(pot); end
+ if verbose, fprintf('%d sends pi to %d\n', i+(t-1)*ss, c+t*ss); disp(pot); end
+ inter_pi_msg{i,c,t+1} = pot;
+ end
+ end
+end
+
+
+marginal = cell(ss,T);
+for t=1:T
+ for i=hnodes
+ %marginal{i,t} = normalise(pi{i,t} .* lambda{i,t});
+ marginal{i,t} = normalise(pi{i,t});
+ end
+end
+
+loglik = 0;
+
+msg.inter_pi_msg = inter_pi_msg;
+msg.inter_lambda_msg = inter_lambda_msg;
+msg.intra_lambda_msg = intra_lambda_msg;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence_obj_oriented.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence_obj_oriented.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,158 @@
+function [marginal, msg, loglik] = filter_evidence_old(engine, evidence)
+% [marginal, msg, loglik] = filter_evidence(engine, evidence) (pearl_dbn)
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+bnet2 = dbn_to_bnet(bnet, T);
+ns = bnet2.node_sizes;
+hnodes = mysetdiff(1:ss, engine.onodes);
+hnodes = hnodes(:)';
+
+[engine.parent_index, engine.child_index] = mk_pearl_msg_indices(bnet2);
+
+msg = init_msgs(bnet2.dag, ns, evidence);
+msg = init_ev_msgs(engine, evidence, msg);
+
+verbose = 1;
+if verbose, fprintf('\nold filtering\n'); end
+
+for t=1:T
+ % update pi
+ for i=hnodes
+ n = i + (t-1)*ss;
+ ps = parents(bnet2.dag, n);
+ if t==1
+ e = bnet.equiv_class(i,1);
+ else
+ e = bnet.equiv_class(i,2);
+ end
+ msg{n}.pi = compute_pi(bnet.CPD{e}, n, ps, msg);
+ %if verbose, fprintf('%d computes pi\n', n); disp(msg{n}.pi); end
+ msg{n}.pi = normalise(msg{n}.pi(:) .* msg{n}.lambda_from_self(:));
+ if verbose, fprintf('%d recomputes pi\n', n); disp(msg{n}.pi); end
+ end
+ % send pi msg to children
+ for i=hnodes
+ n = i + (t-1)*ss;
+ cs = children(bnet2.dag, n);
+ for c=cs(:)'
+ j = engine.parent_index{c}(n); % n is c's j'th parent
+ pi_msg = normalise(compute_pi_msg(n, cs, msg, c, ns));
+ msg{c}.pi_from_parent{j} = pi_msg;
+ if verbose, fprintf('%d sends pi to %d\n', n,c); disp(pi_msg); end
+ end
+ end
+end
+
+
+marginal = cell(ss,T);
+lik = zeros(1,ss*T);
+for t=1:T
+ for i=1:ss
+ n = i + (t-1)*ss;
+ %[bel, lik(n)] = normalise(msg{n}.pi .* msg{n}.lambda);
+ [bel, lik(n)] = normalise(msg{n}.pi);
+ marginal{i,t} = bel;
+ end
+end
+
+loglik = sum(log(lik));
+
+
+
+%%%%%%%
+
+function lambda = compute_lambda(n, cs, msg, ns)
+% Pearl p183 eq 4.50
+lambda = prod_lambda_msgs(n, cs, msg, ns);
+
+%%%%%%%
+
+function pi_msg = compute_pi_msg(n, cs, msg, c, ns)
+% Pearl p183 eq 4.53 and 4.51
+pi_msg = msg{n}.pi .* prod_lambda_msgs(n, cs, msg, ns, c);
+
+%%%%%%%%%
+
+function lam = prod_lambda_msgs(n, cs, msg, ns, except)
+
+if nargin < 5, except = -1; end
+
+%lam = msg{n}.lambda_from_self(:);
+lam = ones(ns(n), 1);
+for i=1:length(cs)
+ c = cs(i);
+ if c ~= except
+ lam = lam .* msg{n}.lambda_from_child{i};
+ end
+end
+
+
+%%%%%%%%%%%
+
+function msg = init_msgs(dag, ns, evidence)
+% INIT_MSGS Initialize the lambda/pi message and state vectors (pearl_dbn)
+% msg = init_msgs(dag, ns, evidence)
+%
+% We assume all the hidden nodes are discrete.
+
+N = length(dag);
+msg = cell(1,N);
+observed = ~isemptycell(evidence(:));
+
+for n=1:N
+ ps = parents(dag, n);
+ msg{n}.pi_from_parent = cell(1, length(ps));
+ for i=1:length(ps)
+ p = ps(i);
+ msg{n}.pi_from_parent{i} = ones(ns(p), 1);
+ end
+
+ cs = children(dag, n);
+ msg{n}.lambda_from_child = cell(1, length(cs));
+ for i=1:length(cs)
+ c = cs(i);
+ msg{n}.lambda_from_child{i} = ones(ns(n), 1);
+ end
+
+ msg{n}.lambda = ones(ns(n), 1);
+ msg{n}.pi = ones(ns(n), 1);
+
+ msg{n}.lambda_from_self = ones(ns(n), 1);
+end
+
+
+%%%%%%%%%
+
+function msg = init_ev_msgs(engine, evidence, msg)
+% Initialize the lambdas with any evidence
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+pot_type = 'd';
+t = 1;
+hnodes = mysetdiff(1:ss, engine.onodes);
+for i=hnodes(:)'
+ c = engine.obschild(i);
+ if c > 0
+ fam = family(bnet.dag, c);
+ e = bnet.equiv_class(c, 1);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,1));
+ temp = pot_to_marginal(CPDpot);
+ n = i;
+ msg{n}.lambda_from_self = temp.T;
+ end
+end
+for t=2:T
+ for i=hnodes(:)'
+ c = engine.obschild(i);
+ if c > 0
+ fam = family(bnet.dag, c, 2);
+ e = bnet.equiv_class(c, 2);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,t-1:t));
+ temp = pot_to_marginal(CPDpot);
+ n = i + (t-1)*ss;
+ msg{n}.lambda_from_self = temp.T;
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,181 @@
+function [marginal, msg, loglik] = smooth_evidence(engine, evidence)
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+onodes = engine.onodes;
+hnodes = mysetdiff(1:ss, onodes);
+hnodes = hnodes(:)';
+
+ns = bnet.node_sizes(:);
+onodes2 = [onodes(:); onodes(:)+ss];
+ns(onodes2) = 1;
+
+verbose = 0;
+pot_type = 'd';
+niter = engine.max_iter;
+
+if verbose, fprintf('new smooth\n'); end
+
+% msg(i1,t1,i2,j2) (i1,t1) -> (i2,t2)
+%lambda_msg = cell(ss,T,ss,T);
+%pi_msg = cell(ss,T,ss,T);
+
+% intra_lambda_msg(i,j,t) (i,t) -> (j,t), i is child
+% inter_lambda_msg(i,j,t) (i,t+1) -> (j,t), i is child
+% inter_pi_msg(i,j,t) (i,t-1) -> (j,t), i is parent
+intra_lambda_msg = cell(ss,ss,T);
+inter_lambda_msg = cell(ss,ss,T);
+inter_pi_msg = cell(ss,ss,T);
+
+lambda = cell(ss,T);
+pi = cell(ss,T);
+
+for t=1:T
+ for i=1:ss
+ lambda{i,t} = ones(ns(i), 1);
+ pi{i,t} = ones(ns(i), 1);
+
+ cs = children(bnet.intra, i);
+ for c=cs(:)'
+ intra_lambda_msg{c,i,t} = ones(ns(i),1);
+ end
+
+ cs = children(bnet.inter, i);
+ for c=cs(:)'
+ inter_lambda_msg{c,i,t} = ones(ns(i),1);
+ end
+
+ ps = parents(bnet.inter, i);
+ for p=ps(:)'
+ inter_pi_msg{p,i,t} = ones(ns(i), 1); % not used for t==1
+ end
+ end
+end
+
+
+% each hidden node absorbs lambda from its observed child (if any)
+for t=1:T
+ for i=hnodes
+ c = engine.obschild(i);
+ if c > 0
+ if t==1
+ fam = family(bnet.dag, c);
+ e = bnet.equiv_class(c, 1);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,1));
+ else
+ fam = family(bnet.dag, c, 2); % within 2 slice network
+ e = bnet.equiv_class(c, 2);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,t-1:t));
+ end
+ temp = pot_to_marginal(CPDpot);
+ lam_msg = normalise(temp.T);
+ intra_lambda_msg{c,i,t} = lam_msg;
+ end
+ end
+end
+
+for iter=1:engine.max_iter
+ % FORWARD
+ for t=1:T
+ % update pi
+ for i=hnodes
+ if t==1
+ e = bnet.equiv_class(i,1);
+ CPD = struct(bnet.CPD{e});
+ pi{i,t} = CPD.CPT;
+ else
+ e = bnet.equiv_class(i,2);
+ CPD = struct(bnet.CPD{e});
+ ps = parents(bnet.inter, i);
+ dom = [ps i+ss];
+ pot = dpot(dom, ns(dom), CPD.CPT);
+ for p=ps(:)'
+ temp = dpot(p, ns(p), inter_pi_msg{p,i,t});
+ pot = multiply_by_pot(pot, temp);
+ end
+ pot = marginalize_pot(pot, i+ss);
+ temp = pot_to_marginal(pot);
+ pi{i,t} = temp.T;
+ end
+ if verbose, fprintf('%d updates pi\n', i+(t-1)*ss); disp(pi{i,t}); end
+ end
+
+ % send pi msg to children
+ for i=hnodes
+ cs = children(bnet.inter, i);
+ for c=cs(:)'
+ pot = pi{i,t};
+ for k=cs(:)'
+ if k ~= c
+ pot = pot .* inter_lambda_msg{k,i,t};
+ end
+ end
+ cs2 = children(bnet.intra, i);
+ for k=cs2(:)'
+ pot = pot .* intra_lambda_msg{k,i,t};
+ end
+ inter_pi_msg{i,c,t+1} = normalise(pot);
+ if verbose, fprintf('%d sends pi to %d\n', i+(t-1)*ss, c+t*ss); disp(inter_pi_msg{i,c,t+1}); end
+ end
+ end
+ end
+
+ if verbose, fprintf('backwards\n'); end
+ % BACKWARD
+ for t=T:-1:1
+ % update lambda
+ for i=hnodes
+ pot = ones(ns(i), 1);
+ cs = children(bnet.inter, i);
+ for c=cs(:)'
+ pot = pot .* inter_lambda_msg{c,i,t};
+ end
+ cs = children(bnet.intra, i);
+ for c=cs(:)'
+ pot = pot .* intra_lambda_msg{c,i,t};
+ end
+ lambda{i,t} = normalise(pot);
+ if verbose, fprintf('%d computes lambda\n', i+(t-1)*ss); disp(lambda{i,t}); end
+ end
+
+ % send lambda msgs to hidden parents in prev slcie
+ for i=hnodes
+ ps = parents(bnet.inter, i);
+ if t > 1
+ e = bnet.equiv_class(i, 2);
+ CPD = struct(bnet.CPD{e});
+ fam = [ps i+ss];
+ for p=ps(:)'
+ pot = dpot(fam, ns(fam), CPD.CPT);
+ temp = dpot(i+ss, ns(i), lambda{i,t});
+ pot = multiply_by_pot(pot, temp);
+ for k=ps(:)'
+ if k ~= p
+ temp = dpot(k, ns(k), inter_pi_msg{k,i,t});
+ pot = multiply_by_pot(pot, temp);
+ end
+ end
+ pot = marginalize_pot(pot, p);
+ temp = pot_to_marginal(pot);
+ inter_lambda_msg{i,p,t-1} = normalise(temp.T);
+ if verbose, fprintf('%d sends lambda to %d\n', i+(t-1)*ss, p+(t-2)*ss); disp(inter_lambda_msg{i,p,t-1}); end
+ end
+ end
+ end
+ end
+end
+
+
+
+marginal = cell(ss,T);
+for t=1:T
+ for i=hnodes
+ marginal{i,t} = normalise(pi{i,t} .* lambda{i,t});
+ end
+end
+
+loglik = 0;
+
+msg.inter_pi_msg = inter_pi_msg;
+msg.inter_lambda_msg = inter_lambda_msg;
+msg.intra_lambda_msg = intra_lambda_msg;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence_fast.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence_fast.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,179 @@
+function [marginal, msg, loglik] = smooth_evidence_fast(engine, evidence)
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+onodes = engine.onodes;
+hnodes = mysetdiff(1:ss, onodes);
+hnodes = hnodes(:)';
+
+ns = bnet.node_sizes(:);
+onodes2 = [onodes(:); onodes(:)+ss];
+ns(onodes2) = 1;
+
+verbose = 0;
+pot_type = 'd';
+niter = engine.max_iter;
+
+if verbose, fprintf('new smooth\n'); end
+
+% msg(i1,t1,i2,j2) (i1,t1) -> (i2,t2)
+%lambda_msg = cell(ss,T,ss,T);
+%pi_msg = cell(ss,T,ss,T);
+
+% intra_lambda_msg(i,j,t) (i,t) -> (j,t), i is child
+% inter_lambda_msg(i,j,t) (i,t+1) -> (j,t), i is child
+% inter_pi_msg(i,j,t) (i,t-1) -> (j,t), i is parent
+intra_lambda_msg = cell(ss,ss,T);
+inter_lambda_msg = cell(ss,ss,T);
+inter_pi_msg = cell(ss,ss,T);
+
+lambda = cell(ss,T);
+pi = cell(ss,T);
+
+for t=1:T
+ for i=1:ss
+ lambda{i,t} = ones(ns(i), 1);
+ pi{i,t} = ones(ns(i), 1);
+
+ cs = children(bnet.intra, i);
+ for c=cs(:)'
+ intra_lambda_msg{c,i,t} = ones(ns(i),1);
+ end
+
+ cs = children(bnet.inter, i);
+ for c=cs(:)'
+ inter_lambda_msg{c,i,t} = ones(ns(i),1);
+ end
+
+ ps = parents(bnet.inter, i);
+ for p=ps(:)'
+ inter_pi_msg{p,i,t} = ones(ns(i), 1); % not used for t==1
+ end
+ end
+end
+
+
+% each hidden node absorbs lambda from its observed child (if any)
+for t=1:T
+ for i=hnodes
+ c = engine.obschild(i);
+ if c > 0
+ if t==1
+ fam = family(bnet.dag, c);
+ e = bnet.equiv_class(c, 1);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,1));
+ else
+ fam = family(bnet.dag, c, 2); % within 2 slice network
+ e = bnet.equiv_class(c, 2);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,t-1:t));
+ end
+ temp = pot_to_marginal(CPDpot);
+ lam_msg = normalise(temp.T);
+ intra_lambda_msg{c,i,t} = lam_msg;
+ end
+ end
+end
+
+for iter=1:engine.max_iter
+ % FORWARD
+ for t=1:T
+ % update pi
+ for i=hnodes
+ if t==1
+ e = bnet.equiv_class(i,1);
+ temp = struct(bnet.CPD{e});
+ pi{i,t} = temp.CPT;
+ else
+ e = bnet.equiv_class(i,2);
+ CPD = struct(bnet.CPD{e});
+ ps = parents(bnet.inter, i);
+ temp = CPD.CPT;
+ for p=ps(:)'
+ temp(:) = temp(:) .* inter_pi_msg{p,i,t}(engine.mult_parent_ndx{i,p});
+ end
+ dom = [ps i+ss];
+ pot = dpot(dom, ns(dom), temp);
+ pot = marginalize_pot(pot, i+ss);
+ temp = pot_to_marginal(pot);
+ pi{i,t} = temp.T;
+ end
+ if verbose, fprintf('%d updates pi\n', i+(t-1)*ss); disp(pi{i,t}); end
+ end
+
+ % send pi msg to children
+ for i=hnodes
+ cs = children(bnet.inter, i);
+ for c=cs(:)'
+ pot = pi{i,t};
+ for k=cs(:)'
+ if k ~= c
+ pot = pot .* inter_lambda_msg{k,i,t};
+ end
+ end
+ cs2 = children(bnet.intra, i);
+ for k=cs2(:)'
+ pot = pot .* intra_lambda_msg{k,i,t};
+ end
+ inter_pi_msg{i,c,t+1} = normalise(pot);
+ if verbose, fprintf('%d sends pi to %d\n', i+(t-1)*ss, c+t*ss); disp(inter_pi_msg{i,c,t+1}); end
+ end
+ end
+ end
+
+ if verbose, fprintf('backwards\n'); end
+ % BACKWARD
+ for t=T:-1:1
+ % update lambda
+ for i=hnodes
+ pot = ones(ns(i), 1);
+ cs = children(bnet.inter, i);
+ for c=cs(:)'
+ pot = pot .* inter_lambda_msg{c,i,t};
+ end
+ cs = children(bnet.intra, i);
+ for c=cs(:)'
+ pot = pot .* intra_lambda_msg{c,i,t};
+ end
+ lambda{i,t} = normalise(pot);
+ if verbose, fprintf('%d computes lambda\n', i+(t-1)*ss); disp(lambda{i,t}); end
+ end
+
+ % send lambda msgs to hidden parents in prev slcie
+ for i=hnodes
+ ps = parents(bnet.inter, i);
+ if t > 1
+ e = bnet.equiv_class(i, 2);
+ CPD = struct(bnet.CPD{e});
+ for p=ps(:)'
+ temp = CPD.CPT(:) .* lambda{i,t}(engine.mult_self_ndx{i});
+ for k=ps(:)'
+ if k ~= p
+ temp(:) = temp(:) .* inter_pi_msg{k,i,t}(engine.mult_parent_ndx{i,k});
+ end
+ end
+ fam = [ps i+ss];
+ pot = dpot(fam, ns(fam), temp);
+ pot = marginalize_pot(pot, p);
+ temp = pot_to_marginal(pot);
+ inter_lambda_msg{i,p,t-1} = normalise(temp.T);
+ if verbose, fprintf('%d sends lambda to %d\n', i+(t-1)*ss, p+(t-2)*ss); disp(inter_lambda_msg{i,p,t-1}); end
+ end
+ end
+ end
+ end
+end
+
+
+
+marginal = cell(ss,T);
+for t=1:T
+ for i=hnodes
+ marginal{i,t} = normalise(pi{i,t} .* lambda{i,t});
+ end
+end
+
+loglik = 0;
+
+msg.inter_pi_msg = inter_pi_msg;
+msg.inter_lambda_msg = inter_lambda_msg;
+msg.intra_lambda_msg = intra_lambda_msg;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/wrong_smooth.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/Old/wrong_smooth.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,210 @@
+function [marginal, msg, loglik] = smooth_evidence(engine, evidence)
+% [marginal, msg, loglik] = smooth_evidence(engine, evidence) (pearl_dbn)
+
+disp('warning: pearl_dbn smoothing is broken');
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+bnet2 = dbn_to_bnet(bnet, T);
+ns = bnet2.node_sizes;
+hnodes = mysetdiff(1:ss, engine.onodes);
+hnodes = hnodes(:)';
+
+onodes2 = unroll_set(engine.onodes(:), ss, T);
+onodes2 = onodes2(:)';
+
+hnodes2 = unroll_set(hnodes(:), ss, T);
+hnodes2 = hnodes2(:)';
+
+[engine.parent_index, engine.child_index] = mk_pearl_msg_indices(bnet2);
+
+msg = init_msgs(bnet2.dag, ns, evidence, bnet2.equiv_class, bnet2.CPD);
+
+verbose = 0;
+pot_type = 'd';
+niter = 1;
+for iter=1:niter
+ % FORWARD
+ for t=1:T
+ if verbose, fprintf('t=%d\n', t); end
+
+ % each hidden node absorbs lambda from its observed child (if any)
+ for i=hnodes
+ c = engine.obschild(i);
+ if c > 0
+ if t==1
+ fam = family(bnet.dag, c);
+ e = bnet.equiv_class(c, 1);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,1));
+ else
+ fam = family(bnet.dag, 2); % within 2 slice network
+ e = bnet.equiv_class(c, 2);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,t-1:t));
+ end
+ temp = pot_to_marginal(CPDpot);
+ n = i + (t-1)*ss;
+ lam_msg = normalise(temp.T);
+ j = engine.child_index{n}(c+(t-1)*ss);
+ assert(j==1);
+ msg{n}.lambda_from_child{j} = lam_msg;
+ if verbose, fprintf('%d sends lambda to %d\n', c + (t-1)*ss, n); disp(lam_msg); end
+ end
+ end
+
+ % update pi
+ for i=hnodes
+ n = i + (t-1)*ss;
+ ps = parents(bnet2.dag, n);
+ if t==1
+ e = bnet.equiv_class(i,1);
+ else
+ e = bnet.equiv_class(i,2);
+ end
+ msg{n}.pi = compute_pi(bnet.CPD{e}, n, ps, msg);
+ if verbose, fprintf('%d computes pi\n', n); disp(msg{n}.pi); end
+ end
+
+ % send pi msg to children in next slice
+ for i=hnodes
+ n = i + (t-1)*ss;
+ %cs = myintersect(children(bnet2.dag, n), hnodes2);
+ cs = children(bnet2.dag, n);
+ for c=cs(:)'
+ j = engine.parent_index{c}(n); % n is c's j'th parent
+ pi_msg = normalise(compute_pi_msg(n, cs, msg, c, ns));
+ msg{c}.pi_from_parent{j} = pi_msg;
+ if verbose, fprintf('%d sends pi to %d\n', n, c); disp(pi_msg); end
+ end
+ end
+ end
+
+ % BACKWARD
+ for t=T:-1:1
+ if verbose, fprintf('t = %d\n', t); end
+
+ % update lambda
+ for i=hnodes
+ n = i + (t-1)*ss;
+ cs = children(bnet2.dag, n);
+ msg{n}.lambda = compute_lambda(n, cs, msg, ns);
+ if verbose, fprintf('%d computes lambda\n', n); disp(msg{n}.lambda); end
+ end
+
+ % send lambda msgs to hidden parents in prev slcie
+ for i=hnodes
+ n = i + (t-1)*ss;
+ %ps = myintersect(parents(bnet2.dag, n), hnodes2);
+ ps = parents(bnet2.dag, n);
+ for p=ps(:)'
+ j = engine.child_index{p}(n); % n is p's j'th child
+ if t > 1
+ e = bnet.equiv_class(i, 2);
+ else
+ e = bnet.equiv_class(i, 1);
+ end
+ lam_msg = normalise(compute_lambda_msg(bnet.CPD{e}, n, ps, msg, p));
+ msg{p}.lambda_from_child{j} = lam_msg;
+ if verbose, fprintf('%d sends lambda to %d\n', n, p); disp(lam_msg); end
+ end
+ end
+
+ % send pi msg to observed children
+ if 0
+ for i=hnodes
+ n = i + (t-1)*ss;
+ cs = myintersect(children(bnet2.dag, n), onodes2);
+ %cs = children(bnet2.dag, n);
+ for c=cs(:)'
+ j = engine.parent_index{c}(n); % n is c's j'th parent
+ pi_msg = normalise(compute_pi_msg(n, cs, msg, c, ns));
+ msg{c}.pi_from_parent{j} = pi_msg;
+ if verbose, fprintf('%d sends pi to %d\n', n, c); disp(pi_msg); end
+ end
+ end
+ end
+
+ end
+end
+
+
+marginal = cell(ss,T);
+lik = zeros(1,ss*T);
+for t=1:T
+ for i=hnodes
+ n = i + (t-1)*ss;
+ [bel, lik(n)] = normalise(msg{n}.pi .* msg{n}.lambda);
+ marginal{i,t} = bel;
+ end
+end
+
+loglik = 0;
+%loglik = sum(log(lik));
+
+
+
+%%%%%%%
+
+function lambda = compute_lambda(n, cs, msg, ns)
+% Pearl p183 eq 4.50
+lambda = prod_lambda_msgs(n, cs, msg, ns);
+
+%%%%%%%
+
+function pi_msg = compute_pi_msg(n, cs, msg, c, ns)
+% Pearl p183 eq 4.53 and 4.51
+pi_msg = msg{n}.pi .* prod_lambda_msgs(n, cs, msg, ns, c);
+
+%%%%%%%%%
+
+function lam = prod_lambda_msgs(n, cs, msg, ns, except)
+
+if nargin < 5, except = -1; end
+
+%lam = msg{n}.lambda_from_self(:);
+lam = ones(ns(n), 1);
+for i=1:length(cs)
+ c = cs(i);
+ if c ~= except
+ lam = lam .* msg{n}.lambda_from_child{i};
+ end
+end
+
+
+%%%%%%%%%
+
+function msg = init_msgs(dag, ns, evidence, eclass, CPD)
+% INIT_MSGS Initialize the lambda/pi message and state vectors (pearl_dbn)
+% msg = init_msgs(dag, ns, evidence)
+
+N = length(dag);
+msg = cell(1,N);
+observed = ~isemptycell(evidence(:));
+
+for n=1:N
+ ps = parents(dag, n);
+ msg{n}.pi_from_parent = cell(1, length(ps));
+ for i=1:length(ps)
+ p = ps(i);
+ msg{n}.pi_from_parent{i} = ones(ns(p), 1);
+ end
+
+ cs = children(dag, n);
+ msg{n}.lambda_from_child = cell(1, length(cs));
+ for i=1:length(cs)
+ c = cs(i);
+ msg{n}.lambda_from_child{i} = ones(ns(n), 1);
+ end
+
+ msg{n}.lambda = ones(ns(n), 1);
+ msg{n}.pi = ones(ns(n), 1);
+
+ % Initialize the lambdas with any evidence
+ if observed(n)
+ v = evidence{n};
+ msg{n}.lambda = zeros(ns(n), 1);
+ msg{n}.lambda(v) = 1; % delta function
+ msg{n}.lambda = [];
+ end
+
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (loopy_dbn)
+% [engine, loglik] = enter_evidence(engine, evidence, ....)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product (not yet supported), else sum-product [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+assert(~maximize);
+assert(~filter);
+
+[engine.marginal, engine.msg, loglik] = enter_soft_ev(engine, evidence);
+engine.evidence = evidence; % needed by marginal_nodes and marginal_family
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/enter_soft_ev.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/enter_soft_ev.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,137 @@
+function [marginal, msg, loglik] = enter_soft_ev(engine, evidence)
+% [marginal, msg, loglik] = smooth_evidence(engine, evidence) (pearl_dbn)
+
+
+[ss T] = size(evidence);
+bnet = bnet_from_engine(engine);
+bnet2 = dbn_to_bnet(bnet, T);
+ns = bnet2.node_sizes;
+hnodes = mysetdiff(1:ss, engine.onodes);
+hnodes = hnodes(:)';
+
+onodes2 = unroll_set(engine.onodes(:), ss, T);
+onodes2 = onodes2(:)';
+
+hnodes2 = unroll_set(hnodes(:), ss, T);
+hnodes2 = hnodes2(:)';
+
+[engine.parent_index, engine.child_index] = mk_pearl_msg_indices(bnet2);
+
+rand_init = 0;
+use_ev = 0;
+msg = init_pearl_msgs(bnet2.dag, ns, evidence, rand_init, use_ev);
+msg = init_pearl_dbn_ev_msgs(bnet, evidence, engine);
+
+verbose = 0;
+pot_type = 'd';
+niter = engine.max_iter;
+
+if verbose, fprintf('old smooth\n'); end
+
+for iter=1:niter
+ % FORWARD
+ for t=1:T
+ if verbose, fprintf('t=%d\n', t); end
+
+ % update pi
+ for i=hnodes
+ n = i + (t-1)*ss;
+ ps = parents(bnet2.dag, n);
+ if t==1
+ e = bnet.equiv_class(i,1);
+ else
+ e = bnet.equiv_class(i,2);
+ end
+ msg{n}.pi = compute_pi(bnet.CPD{e}, n, ps, msg);
+ if verbose, fprintf('%d computes pi\n', n); disp(msg{n}.pi); end
+ end
+
+ % send pi msg to children
+ for i=hnodes
+ n = i + (t-1)*ss;
+ %cs = myintersect(children(bnet2.dag, n), hnodes2);
+ cs = children(bnet2.dag, n); % must use all children to get index right
+ for c=cs(:)'
+ j = engine.parent_index{c}(n); % n is c's j'th parent
+ pi_msg = normalise(compute_pi_msg(n, cs, msg, c, ns));
+ msg{c}.pi_from_parent{j} = pi_msg;
+ if verbose, fprintf('%d sends pi to %d\n', n, c); disp(pi_msg); end
+ end
+ end
+ end
+
+ % BACKWARD
+ for t=T:-1:1
+ if verbose, fprintf('t = %d\n', t); end
+
+ % update lambda
+ for i=hnodes
+ n = i + (t-1)*ss;
+ cs = children(bnet2.dag, n);
+ msg{n}.lambda = compute_lambda(n, cs, msg, ns);
+ if verbose, fprintf('%d computes lambda\n', n); disp(msg{n}.lambda); end
+ end
+
+ % send lambda msgs to hidden parents in prev slcie
+ for i=hnodes
+ n = i + (t-1)*ss;
+ ps = parents(bnet2.dag, n);
+ for p=ps(:)'
+ j = engine.child_index{p}(n); % n is p's j'th child
+ if t > 1
+ e = bnet.equiv_class(i, 2);
+ else
+ e = bnet.equiv_class(i, 1);
+ end
+ lam_msg = normalise(compute_lambda_msg(bnet.CPD{e}, n, ps, msg, p));
+ msg{p}.lambda_from_child{j} = lam_msg;
+ if verbose, fprintf('%d sends lambda to %d\n', n, p); disp(lam_msg); end
+ end
+ end
+
+ end
+end
+
+
+marginal = cell(ss,T);
+lik = zeros(1,ss*T);
+for t=1:T
+ for i=hnodes
+ n = i + (t-1)*ss;
+ [bel, lik(n)] = normalise(msg{n}.pi .* msg{n}.lambda);
+ marginal{i,t} = bel;
+ end
+end
+
+loglik = 0;
+%loglik = sum(log(lik));
+
+
+
+%%%%%%%
+
+function lambda = compute_lambda(n, cs, msg, ns)
+% Pearl p183 eq 4.50
+lambda = prod_lambda_msgs(n, cs, msg, ns);
+
+%%%%%%%
+
+function pi_msg = compute_pi_msg(n, cs, msg, c, ns)
+% Pearl p183 eq 4.53 and 4.51
+pi_msg = msg{n}.pi .* prod_lambda_msgs(n, cs, msg, ns, c);
+
+%%%%%%%%%
+
+function lam = prod_lambda_msgs(n, cs, msg, ns, except)
+
+if nargin < 5, except = -1; end
+
+%lam = msg{n}.lambda_from_self(:);
+lam = ones(ns(n), 1);
+for i=1:length(cs)
+ c = cs(i);
+ if c ~= except
+ lam = lam .* msg{n}.lambda_from_child{i};
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function marginal = marginal_nodes(engine, nodes, t)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (pearl_dbn)
+% marginal = marginal_nodes(engine, i, t)
+% returns Pr(X(i,t) | Y(1:T)), where X(i,t) is the i'th node in the t'th slice.
+% If enter_evidence used filtering instead of smoothing, this will return Pr(X(i,t) | Y(1:t)).
+
+if nargin < 3, t = 1; end
+assert(length(nodes)==1);
+i = nodes(end);
+if ~myismember(i, engine.onodes)
+ marginal.T = engine.marginal{i,t};
+else
+ marginal.T = 1; % observed
+end
+
+% we convert the domain to the unrolled numbering system
+% so that update_ess extracts the right evidence.
+marginal.domain = nodes+(t-1)*engine.ss;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/pearl_dbn_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/pearl_dbn_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+function engine = pearl_dbn_inf_engine(bnet, varargin)
+% LOOPY_DBN_INF_ENGINE Loopy Pearl version of forwards-backwards
+% engine = loopy_dbn_inf_engine(bnet, ...)
+%
+% Optional arguments
+% 'max_iter' - specifies the max num. forward-backward passes to perform [1]
+% 'tol' - as in loopy_pearl [1e-3]
+% 'momentum' - as in loopy_pearl [0]
+
+error('pearl_dbn does not work yet')
+
+max_iter = 1;
+tol = 1e-3;
+momentum = 0;
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'max_iter', max_iter = args{i+1};
+ case 'tol', tol = args{i+1};
+ case 'momentum', momentum = args{i+1};
+ end
+ end
+end
+
+
+engine.max_iter = max_iter;
+engine.tol = tol;
+engine.momentum = momentum;
+engine.pearl_engine = [];
+engine.T = [];
+engine.ss = length(bnet.intra);
+
+engine.marginal = [];
+engine.evidence = [];
+engine.msg = [];
+engine.parent_index = [];
+engine.child_index = [];
+%[engine.parent_index, engine.child_index] = mk_pearl_msg_indices(bnet); % need to unroll first
+
+ss = length(bnet.intra);
+engines.ss = ss;
+onodes = bnet.observed;
+hnodes = mysetdiff(1:ss, onodes);
+obschild = zeros(1,ss);
+for i=hnodes(:)'
+ %ocs = myintersect(children(bnet.dag, i), onodes);
+ ocs = children(bnet.intra, i);
+ assert(length(ocs) <= 1);
+ if length(ocs)==1
+ obschild(i) = ocs(1);
+ end
+end
+engine.obschild = obschild;
+
+engine.mult_self_ndx = [];
+engine.mult_parent_ndx = [];
+engine.marg_self_ndx = [];
+engine.marg_parent_ndx = [];
+
+
+engine = class(engine, 'loopy_dbn_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/init_pearl_dbn_ev_msgs.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@pearl_dbn_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/private/init_pearl_dbn_ev_msgs.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_dbn_inf_engine/private/init_pearl_dbn_ev_msgs.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function msg = init_pearl_dbn_ev_msgs(bnet, evidence, engine)
+
+[ss T] = size(evidence);
+pot_type = 'd';
+
+% each hidden node absorbs lambda from its observed child (if any)
+for t=1:T
+ for i=hnodes
+ c = engine.obschild(i);
+ if c > 0
+ if t==1
+ fam = family(bnet.dag, c);
+ e = bnet.equiv_class(c, 1);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,1));
+ else
+ fam = family(bnet.dag, c, 2); % within 2 slice network
+ e = bnet.equiv_class(c, 2);
+ CPDpot = CPD_to_pot(pot_type, bnet.CPD{e}, fam, bnet.node_sizes(:), bnet.cnodes(:), evidence(:,t-1:t));
+ end
+ temp = pot_to_marginal(CPDpot);
+ n = i + (t-1)*ss;
+ lam_msg = normalise(temp.T);
+ j = engine.child_index{n}(c+(t-1)*ss);
+ assert(j==1);
+ msg{n}.lambda_from_child{j} = lam_msg;
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/pearl_unrolled_dbn_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/update_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function [engine, loglik, niter] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (loopy_unrolled_dbn)
+% [engine, loglik, niter] = enter_evidence(engine, evidence, ....)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product (not yet supported), else sum-product [0]
+% filename - as in loopy_pearl
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filename = engine.filename;
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filename', filename = args{i+1};
+ end
+ end
+end
+
+
+[ss T] = size(evidence);
+if T ~= engine.T
+ bnetT = dbn_to_bnet(bnet_from_engine(engine), T);
+ engine.unrolled_engine = pearl_inf_engine(bnetT, 'protocol', engine.protocol, ...
+ 'max_iter', engine.max_iter_per_slice * T, ...
+ 'tol', engine.tol, 'momentum', engine.momentum);
+ engine.T = T;
+end
+[engine.unrolled_engine, loglik, niter] = enter_evidence(engine.unrolled_engine, evidence(:), ...
+ 'maximize', maximize, 'filename', filename);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function marginal = marginal_family(engine, i, t, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (jtree_unrolled_dbn)
+% marginal = marginal_family(engine, i, t)
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+assert(~add_ev);
+
+%marginal = marginal_family(engine.unrolled_engine, i + (t-1)*engine.ss, add_ev);
+marginal = marginal_family(engine.unrolled_engine, i + (t-1)*engine.ss);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function marginal = marginal_nodes(engine, nodes, t, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (loopy_unrolled_dbn)
+% marginal = marginal_nodes(engine, nodes, t)
+%
+% 't' specifies the time slice of the earliest node in 'nodes'.
+% 'nodes' must occur in some clique.
+%
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3,
+% i.e., nodes 3 and 5 in the unrolled network,
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+
+marginal = marginal_nodes(engine.unrolled_engine, nodes + (t-1)*engine.ss, add_ev);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/pearl_unrolled_dbn_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/pearl_unrolled_dbn_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function engine = pearl_unrolled_dbn_inf_engine(bnet, varargin)
+% LOOPY_DBN_INF_ENGINE Loopy Pearl version of forwards-backwards
+% engine = loopy_unrolld_dbn_inf_engine(bnet, ...)
+%
+% Optional arguments
+% 'max_iter' - specifies the max num. forward-backward passes to perform PER SLICE [2]
+% 'tol' - as in loopy_pearl [1e-3]
+% 'momentum' - as in loopy_pearl [0]
+% protocol - tree or parallel [parallel]
+% filename - as in pearl [ '' ]
+
+max_iter_per_slice = 2;
+tol = 1e-3;
+momentum = 0;
+protocol = 'parallel';
+filename = '';
+
+args = varargin;
+for i=1:2:length(args)
+ switch args{i},
+ case 'max_iter', max_iter_per_slice = args{i+1};
+ case 'tol', tol = args{i+1};
+ case 'momentum', momentum = args{i+1};
+ case 'protocol', protocol = args{i+1};
+ case 'filename', filename = args{i+1};
+ end
+end
+
+engine.filename = filename;
+engine.max_iter_per_slice = max_iter_per_slice;
+engine.tol = tol;
+engine.momentum = momentum;
+engine.unrolled_engine = [];
+engine.T = -1;
+engine.ss = length(bnet.intra);
+engine.protocol = protocol;
+
+engine = class(engine, 'pearl_unrolled_dbn_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@pearl_unrolled_dbn_inf_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (pearl_unrolled_dbn)
+% engine = update_engine(engine, newCPDs)
+
+engine.inf_engine = update_engine(engine.inf_engine, newCPDs);
+engine.unrolled_engine = update_engine(engine.unrolled_engine, newCPDs);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/enter_evidence.m/1.1.1.1/Wed Feb 19 09:52:12 2003//
+/marginal_family.m/1.1.1.1/Wed Feb 19 09:52:12 2003//
+/marginal_nodes.m/1.1.1.1/Wed Feb 19 09:52:12 2003//
+/stable_ho_inf_engine.m/1.1.1.1/Fri Mar 14 09:45:34 2003//
+/test_ho_inf_enginge.m/1.1.1.1/Wed Feb 19 09:52:12 2003//
+/update_engine.m/1.1.1.1/Wed Feb 19 09:52:12 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic/@stable_ho_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (jtree_unrolled_dbn)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product instead of sum-product [0]
+% filter - if 1, does filtering (not supported), else smoothing [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+maximize = 0;
+filter = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filter', filter = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+if filter
+ error('jtree_unrolled_dbn does not support filtering')
+end
+
+if size(evidence,2) ~= engine.nslices
+ error(['engine was created assuming there are ' num2str(engine.nslices) ...
+ ' slices, but evidence has ' num2str(size(evidence,2))])
+end
+
+[engine.unrolled_engine, loglik] = enter_evidence(engine.unrolled_engine, evidence, 'maximize', maximize);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function marginal = marginal_family(engine, i, t, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (jtree_unrolled_dbn)
+% marginal = marginal_family(engine, i, t)
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+assert(~add_ev);
+
+%marginal = marginal_family(engine.unrolled_engine, i + (t-1)*engine.ss, add_ev);
+marginal = marginal_family(engine.unrolled_engine, i + (t-1)*engine.ss);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function marginal = marginal_nodes(engine, nodes, t, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (loopy_unrolled_dbn)
+% marginal = marginal_nodes(engine, nodes, t)
+%
+% 't' specifies the time slice of the earliest node in 'nodes'.
+% 'nodes' must occur in some clique.
+%
+% Example:
+% Consider a DBN with 2 nodes per slice.
+% Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3,
+% i.e., nodes 3 and 5 in the unrolled network,
+
+if nargin < 3, t = 1; end
+if nargin < 4, add_ev = 0; end
+
+marginal = marginal_nodes(engine.unrolled_engine, nodes + (t-1)*engine.ss, add_ev);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/stable_ho_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/stable_ho_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,67 @@
+function engine = dv_unrolled_dbn_inf_engine(bnet, T, varargin)
+% JTREE_UNROLLED_DBN_INF_ENGINE Unroll the DBN for T time-slices and apply jtree to the resulting static net
+% engine = jtree_unrolled_dbn_inf_engine(bnet, T, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% useC - 1 means use jtree_C_inf_engine instead of jtree_inf_engine [0]
+% constrained - 1 means we constrain ourselves to eliminate slice t before t+1 [1]
+%
+% e.g., engine = jtree_unrolled_inf_engine(bnet, 'useC', 1);
+
+% set default params
+N = length(bnet.intra);
+useC = 0;
+constrained = 1;
+
+if nargin >= 3
+ args = varargin;
+ nargs = length(args);
+ if isstr(args{1})
+ for i=1:2:nargs
+ switch args{i},
+ case 'useC', useC = args{i+1};
+ case 'constrained', constrained = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+ else
+ error(['invalid argument name ' args{1}]);
+ end
+end
+
+bnet2 = hodbn_to_bnet(bnet, T);
+ss = length(bnet.intra);
+engine.ss = ss;
+
+% If constrained_order = 1 we constrain ourselves to eliminate slice t before t+1.
+% This prevents cliques containing nodes from far-apart time-slices.
+if constrained
+ stages = num2cell(unroll_set(1:ss, ss, T), 1);
+else
+ stages = { 1:length(bnet2.dag) };
+end
+if useC
+ %jengine = jtree_C_inf_engine(bnet2, 'stages', stages);
+ %function is not implemented
+ assert(0)
+else
+ jengine = stab_cond_gauss_inf_engine(bnet2);
+end
+
+engine.unrolled_engine = jengine;
+% we don't inherit from jtree_inf_engine, because that would only store bnet2,
+% and we would lose access to the DBN-specific fields like intra/inter
+
+engine.nslices = T;
+engine = class(engine, 'stable_ho_inf_engine', inf_engine(bnet));
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/test_ho_inf_enginge.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/test_ho_inf_enginge.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,87 @@
+function [engine,engine2] = test_ho_inf_enginge(order,T)
+
+assert(order >= 1)
+% Model a SISO system, i. e. all node are one-dimensional
+% The nodes are numbered as follows
+% u(t) = 1 input
+% y(t) = 2 model output
+% z(t) = 3 noise
+% q(t) = 4 observed output = noise + model output
+
+ns = [1 1 1 1];
+
+% Model a linear system, i.e. there are no discrete nodes
+dn = [];
+
+% Modeling of connections within a time slice
+intra = zeros(4);
+intra(2,4) = 1; % Connection y(t) -> q(t)
+intra(3,4) = 1; % Connection z(t) -> q(t)
+
+% Connections to the next time slice
+inter = zeros(4,4,order);
+inter(1,2,1) = 1; % u(t) -> y(t+1);
+inter(2,2,1) = 1; %y(t) -> y(t+1);
+inter(3,3,1) = 1; %z(t) -> z(t+1);
+
+if order >= 2
+ inter(1,2,2) = 1; % u(t) -> y(t+2);
+ inter(2,2,2) = 1; % y(t) -> y(t+2);
+end
+
+for i = 3: order
+ inter(:,:,i) = inter(:,:,i-1); %u(t) -> y(t+i) y(t) -> y(t) +i
+end;
+
+
+% Compution of a higer order Markov Model
+bnet = mk_higher_order_dbn(intra,inter,ns,'discrete',dn);
+bnet2 = mk_dbn(intra,inter(:,:,1),ns,'discrete',dn)
+
+
+%Calculation of the number of nodes with different parameters
+%There is one input and one output nodes 2
+%There are two different disturbance node 2
+%There are order +1 nodes for y 1 + order
+numOfNodes = 5 + order;
+
+% First input node
+bnet.CPD{1} = gaussian_CPD(bnet,1,'mean',0);
+bnet2.CPD{1} = gaussian_CPD(bnet,1,'mean',0);
+% Modeled output
+bnet.CPD{2} = gaussian_CPD(bnet,2,'mean',0);
+bnet2.CPD{2} = gaussian_CPD(bnet,2,'mean',0);
+%Disturbance
+bnet.CPD{3} = gaussian_CPD(bnet,3,'mean',0);
+bnet2.CPD{3} = gaussian_CPD(bnet,3,'mean',0);
+
+%Qutput
+bnet.CPD{4} = gaussian_CPD(bnet,4,'mean',0);
+bnet2.CPD{4} = gaussian_CPD(bnet,4,'mean',0);
+
+
+%Output node in the second time-slice
+%Remember that node number 6 is an example for
+%the fifth equivalence class
+bnet.CPD{5} = gaussian_CPD(bnet,6,'mean',0);
+bnet2.CPD{5} = gaussian_CPD(bnet,6,'mean',0);
+
+%Disturbance node in the second time slice
+bnet.CPD{6} = gaussian_CPD(bnet,7,'mean',0);
+bnet2.CPD{6} = gaussian_CPD(bnet,7,'mean',0);
+
+% Modeling of the remaining nodes for y
+for i = 7:numOfNodes
+ bnet.CPD{i} = gaussian_CPD(bnet,(i - 6)*4 + 7,'mean',0);
+end
+
+% Generation of the inference engine
+engine = dv_unrolled_dbn_inf_engine(bnet,T);
+engine2 = jtree_unrolled_dbn_inf_engine(bnet,T);
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/@stable_ho_inf_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (jtree_unrolled_dbn)
+% engine = update_engine(engine, newCPDs)
+
+engine.inf_engine = update_engine(engine.inf_engine, newCPDs);
+engine.unrolled_engine = update_engine(engine.unrolled_engine, newCPDs);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/dummy/1.1.1.1/Sat Jan 18 22:22:28 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+A D/@bk_ff_hmm_inf_engine////
+A D/@bk_inf_engine////
+A D/@cbk_inf_engine////
+A D/@ff_inf_engine////
+A D/@frontier_inf_engine////
+A D/@hmm_inf_engine////
+A D/@jtree_dbn_inf_engine////
+A D/@jtree_unrolled_dbn_inf_engine////
+A D/@kalman_inf_engine////
+A D/@pearl_dbn_inf_engine////
+A D/@pearl_unrolled_dbn_inf_engine////
+A D/@stable_ho_inf_engine////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/dynamic
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/dynamic/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/bnet_from_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/filter_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/online/@filter_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/bnet_from_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/bnet_from_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function bnet = bnet_from_engine(engine)
+% BNET_FROM_ENGINE Return the bnet structure stored inside the engine (smoother_engine)
+% bnet = bnet_from_engine(engine)
+
+bnet = bnet_from_engine(engine.tbn_engine);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function [engine, LL] = enter_evidence(engine, ev, t)
+% ENTER_EVIDENCE Call the online filter
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i} = [] if if X(i) is hidden, and otherwise contains its observed value (scalar or column vector)
+
+engine.old_f = engine.f;
+if t==1
+ [engine.f, LL] = fwd1(engine.tbn_engine, ev, 1);
+else
+ [engine.f, LL] = fwd(engine.tbn_engine, engine.old_f, ev, t);
+end
+engine.b = backT(engine.tbn_engine, engine.f, t);
+engine.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/filter_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/filter_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function engine = filter_engine(tbn_engine)
+% FILTER_ENGINE Create an engine which does online filtering
+% function engine = filter_engine(tbn_engine)
+
+engine.tbn_engine = tbn_engine;
+engine.f = []; % space to store filtered message
+engine.old_f = [];
+engine.b = []; % space to store smoothed message
+engine.t = [];
+engine = class(engine, 'filter_engine');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function marginal = marginal_family(engine, i, t, add_ev)
+% MARGINAL_FAMILY Compute the joint distribution on a set of family (filter_engine)
+% function marginal = marginal_family(engine, i, t, add_ev)
+
+if nargin < 4, add_ev = 0; end
+
+if t ~= engine.t
+ error('mixed up time stamps')
+end
+
+marginal = marginal_family(engine.tbn_engine, engine.b, i, t, add_ev);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@filter_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function marginal = marginal_nodes(engine, nodes, t, add_ev)
+% MARGINAL_NODES Compute the joint distribution on a set of nodes (filter_engine)
+% function marginal = marginal_nodes(engine, nodes, t, add_ev)
+
+if nargin < 4, add_ev = 0; end
+
+if t ~= engine.t
+ error('mixed up time stamps')
+end
+marginal = marginal_nodes(engine.tbn_engine, engine.b, nodes, t, add_ev);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+/back.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/backT.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/fwd.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/fwd1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/hmm_2TBN_inf_engine.m/1.1.1.1/Thu Nov 14 20:03:50 2002//
+/marginal_family.m/1.1.1.1/Thu Nov 14 20:05:36 2002//
+/marginal_nodes.m/1.1.1.1/Thu Nov 14 20:02:46 2002//
+/update_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/online/@hmm_2TBN_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/back.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/back.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function b = back(engine, bfuture, f, t)
+
+if f.t ~= t
+ error('mixed up time stamps')
+end
+
+b.t = t;
+b.obslik = f.obslik;
+bb_future = bfuture.beta .* bfuture.obslik;
+if engine.maximize
+ B = repmat(bb_future(:)', length(bfuture.beta), 1);
+ b.beta = normalise(max(engine.transprob .* B, [], 2));
+else
+ b.beta = normalise((engine.transprob * bb_future));
+end
+b.gamma = normalise(f.alpha .* b.beta);
+if t > 1
+ bb_t = b.beta .* b.obslik;
+ b.xi = normalise((engine.transprob .* (f.past_alpha * bb_t'))); % t-1,t
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/backT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/backT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function b = backT(engine, f, t)
+
+b.t = t;
+b.obslik = f.obslik;
+Q = length(f.alpha);
+b.beta = ones(Q,1);
+b.gamma = f.alpha;
+if t > 1
+ bb_t = b.obslik;
+ b.xi = normalise((engine.transprob .* (f.past_alpha * bb_t'))); % T-1,T
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/fwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/fwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function [f, logscale] = fwd(engine, fpast, ev, t)
+% Forwards pass.
+
+f.obslik = mk_hmm_obs_lik_vec(engine, ev);
+transmat = engine.transprob;
+f.past_alpha = fpast.alpha;
+if engine.maximize
+ Q = length(fpast.alpha);
+ A = repmat(fpast.alpha, [1 Q]);
+ m = max(transmat .* A, [], 1);
+ [f.alpha, scale] = normalise(m(:) .* f.obslik);
+else
+ [f.alpha, scale] = normalise((transmat' * fpast.alpha) .* f.obslik);
+end
+logscale = log(scale);
+%f.xi = normalise((fpast.alpha * obslik') .* transmat); % t-1,t
+f.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/fwd1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/fwd1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function [f, logscale] = fwd1(engine, ev, t)
+% Forwards pass for slice 1.
+
+if t ~= 1
+ error('mixed up time stamps')
+end
+prior = engine.startprob(:);
+f.obslik = mk_hmm_obs_lik_vec(engine, ev);
+[f.alpha, lik] = normalise(prior .* f.obslik);
+logscale = log(lik);
+f.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/hmm_2TBN_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/hmm_2TBN_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+function engine = hmm_2TBN_inf_engine(bnet, varargin)
+% HMM_2TBN_INF_ENGINE Inference engine for DBNs which uses the forwards-backwards algorithm.
+% engine = hmm_2TBN_inf_engine(bnet, ...)
+%
+% The DBN is converted to an HMM with a single meganode, but the observed nodes remain factored.
+% This can be faster than jtree if the num. hidden nodes is low, because of lower constant factors.
+%
+% All hidden nodes must be discrete.
+% All observed nodes are assumed to be leaves.
+% The parents of each observed leaf are assumed to be a subset of the hidden nodes within the same slice.
+% The only exception is if bnet is an AR-HMM, where the parents are assumed to be self in the
+% previous slice (continuous), plus all the discrete nodes in the current slice.
+
+
+%% Optional arguments
+%% ndx_type - 'B', 'D', or 'SD', used in marginal_family [ 'SD' ]
+
+ndx_type = 'SD';
+ss = bnet.nnodes_per_slice;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ %case 'ndx_type', ndx_type = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+% Stuff to do with speeding up marginal_family
+%engine.ndx_type = ndx_type;
+
+[int, engine.persist, engine.transient] = compute_interface_nodes(bnet.intra, bnet.inter);
+engine.persist_bitv = zeros(1, ss);
+engine.persist_bitv(engine.persist) = 1;
+
+
+ns = bnet.node_sizes(:);
+ns(bnet.observed) = 1;
+ns(bnet.observed+ss) = 1;
+engine.eff_node_sizes = ns;
+
+% for n=1:ss
+% dom = 1:(2*ss); % domain of xi(:,:,1)
+% fam = family(bnet.dag, n+ss);
+% engine.marg_fam2_ndx_id(n) = add_ndx(dom, fam, ns, ndx_type);
+
+% dom = 1:ss; % domain of gamma(:,:,1)
+% fam = family(bnet.dag, n);
+% engine.marg_fam1_ndx_id(n) = add_ndx(dom, fam, ns, ndx_type);
+
+% engine.marg_singleton_ndx_id(n) = add_ndx(dom, n, ns, ndx_type);
+% end
+
+for o=bnet.observed(:)'
+ %if bnet.equiv_class(o,1) ~= bnet.equiv_class(o,2)
+ % error(['observed node ' num2str(o) ' is not tied'])
+ %end
+ cs = children(bnet.dag, o);
+ if ~isempty(cs)
+ error(['observed node ' num2str(o) ' is not allowed children'])
+ end
+end
+
+[engine.startprob, engine.transprob, engine.obsprob] = dbn_to_hmm(bnet);
+
+% This is where we will store the results between enter_evidence and marginal_nodes
+engine.one_slice_marginal = [];
+engine.two_slice_marginal = [];
+
+ss = length(bnet.intra);
+engine.maximize = [];
+engine.evidence = [];
+engine.node_sizes = [];
+
+% avoid the need to do bnet_from_engine, which is slow
+engine.slice_size = ss;
+engine.parents = bnet.parents;
+
+engine.bel = [];
+engine = class(engine, 'hmm_2TBN_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+function marginal = marginal_family(engine, b, i, t, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (hmm_2TBN)
+% marginal = marginal_family(engine, b, i, t, add_ev)
+
+ns = engine.eff_node_sizes(:);
+ss = engine.slice_size;
+
+if t==1 % | ~engine.persist_bitv(i)
+ bigT = b.gamma;
+ ps = engine.parents{i};
+ dom = [ps i];
+ %id = engine.marg_fam1_ndx_id(i);
+ bigdom = 1:ss;
+ bigsz = ns(bigdom);
+ bigdom = bigdom + (t-1)*ss;
+else % some parents are in previous slice
+ bigT = b.xi; % (t-1,t)
+ ps = engine.parents{i+ss};
+ dom = [ps i+ss] + (t-2)*ss;
+ %id = engine.marg_fam2_ndx_id(i);
+ bigdom = 1:(2*ss); % domain of xi(:,:,t)
+ bigsz = ns(bigdom);
+ bigdom = bigdom + (t-2)*ss;
+end
+marginal.domain = dom;
+
+%ndx = get_ndx(id, engine.ndx_type);
+%marginal.T = marg_table_ndx(bigT, engine.maximize, ndx, engine.ndx_type);
+%global SD_NDX
+%ndx = SD_NDX{id};
+%marginal.T = marg_table_ndxSD(bigT, engine.maximize, ndx);
+marginal.T = marg_table(bigT, bigdom, bigsz, dom, engine.maximize);
+
+assert(~add_ev);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function marginal = marginal_nodes(engine, b, nodes, t, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified nodes (hmm_2TBN)
+% marginal = marginal_nodes(engine, b, nodes, t, add_ev)
+%
+% nodes must be a singleton set
+
+assert(length(nodes)==1)
+ss = engine.slice_size;
+
+i = nodes(1);
+bigT = b.gamma;
+dom = i + (t-1)*ss;
+
+%id = engine.marg_singleton_ndx_id(i);
+%global SD_NDX
+%ndx = SD_NDX{id};
+%marginal.T = marg_table_ndxSD(bigT, engine.maximize, ndx);
+
+ns = engine.eff_node_sizes(:);
+bigdom = 1:ss;
+marginal.T = marg_table(bigT, bigdom + (t-1)*ss, ns(bigdom), dom, engine.maximize);
+
+marginal.domain = dom;
+assert(~add_ev);
+%if add_ev
+% marginal = add_ev_to_dmarginal(marginal, engine.evidence, engine.node_sizes);
+%end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/mk_hmm_obs_lik_vec.m/1.1.1.1/Sun May 4 21:47:44 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/online/@hmm_2TBN_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/private/mk_hmm_obs_lik_vec.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/private/mk_hmm_obs_lik_vec.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+function obslik = mk_hmm_obs_lik_vec(engine, evidence)
+
+% P(o1,o2| h) = P(o1|h) * P(o2|h) where h = Q1,Q2,...
+
+bnet = bnet_from_engine(engine);
+ss = length(bnet.intra);
+onodes = bnet.observed;
+hnodes = mysetdiff(1:ss, onodes);
+ns = bnet.node_sizes(:);
+ns(onodes) = 1;
+
+Q = length(engine.startprob);
+obslik = ones(Q, 1);
+
+for i=1:length(onodes)
+ o = onodes(i);
+ %data = cell2num(evidence(o,1));
+ data = evidence{o,1};
+ if myismember(o, bnet.dnodes)
+ %obslik_i = eval_pdf_cond_multinomial(data, engine.obsprob{i}.CPT);
+ obslik_i = multinomial_prob(data, engine.obsprob{i}.CPT);
+ else
+ if bnet.auto_regressive(o)
+ error('can''t handle AR nodes')
+ end
+ %% calling mk_ghmm_obs_lik, which calls gaussian_prob, is slow, so we inline it
+ %% and use the pre-computed inverse matrix
+ %obslik_i = mk_ghmm_obs_lik(data, engine.obsprob{i}.mu, engine.obsprob{i}.Sigma);
+ x = data(:);
+ m = engine.obsprob{i}.mu;
+ Qi = size(m, 2);
+ obslik_i = size(Qi, 1);
+ invC = engine.obsprob{i}.inv_Sigma;
+ denom = engine.obsprob{i}.denom;
+ for j=1:Qi
+ numer = exp(-0.5 * (x-m(:,j))' * invC(:,:,j) * (x-m(:,j)));
+ obslik_i(j) = numer / denom(j);
+ end
+ end
+ % convert P(o|ps) into P(o|h) by multiplying onto a (h,o) potential of all 1s
+ ps = bnet.parents{o};
+ dom = [ps o];
+ obspot_i = dpot(dom, ns(dom), obslik_i);
+ dom = [hnodes o];
+ obspot = dpot(dom, ns(dom));
+ obspot = multiply_by_pot(obspot, obspot_i);
+ % compute p(oi|h) * p(oj|h)
+ S = struct(obspot);
+ obslik = obslik .* S.T(:);
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@hmm_2TBN_inf_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (hmm)
+% engine = update_engine(engine, newCPDs)
+
+%engine.inf_engine.bnet.CPD = newCPDs;
+engine.inf_engine = update_engine(engine.inf_engine, newCPDs);
+[engine.startprob, engine.transprob, engine.obsprob] = dbn_to_hmm(bnet_from_engine(engine));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+/back.m/1.1.1.1/Mon Jun 17 23:34:12 2002//
+/back1.m/1.1.1.1/Mon Jun 17 23:34:26 2002//
+/back1_mpe.m/1.1.1.1/Mon Jun 17 23:49:40 2002//
+/backT.m/1.1.1.1/Mon Jun 17 23:34:20 2002//
+/backT_mpe.m/1.1.1.1/Mon Jun 17 23:38:56 2002//
+/back_mpe.m/1.1.1.1/Sun Jul 21 00:32:52 2002//
+/fwd.m/1.1.1.1/Mon Jun 17 23:46:06 2002//
+/fwd1.m/1.1.1.1/Mon Jun 17 23:46:20 2002//
+/jtree_2TBN_inf_engine.m/1.1.1.1/Thu Nov 14 16:31:58 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/set_fields.m/1.1.1.1/Sun Jul 21 01:25:30 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/online/@jtree_2TBN_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/jtree_2TBN_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/online/@jtree_2TBN_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/Old/jtree_2TBN_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/Old/jtree_2TBN_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,116 @@
+function engine = jtree_2TBN_inf_engine(bnet, varargin)
+% JTREE_ONLINE_INF_ENGINE Online Junction tree inference algorithm for DBNs.
+% engine = jtree_online_inf_engine(bnet, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% clusters - specifies variables that must be grouped in the 1.5 slice DBN
+% maximize - 1 means do max-product, 0 means sum-product [0]
+%
+% The same nodes must be observed in every slice.
+
+ss = length(bnet.intra);
+clusters = {};
+engine.maximize = 0;
+
+args = varargin;
+nargs = length(args);
+for i=1:2:length(args)
+ switch args{i},
+ case 'clusters', clusters = args{i+1};
+ case 'maximize', engine.maximize = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+end
+
+engine.evidence = [];
+engine.node_sizes = [];
+
+%int = compute_interface_nodes(bnet.intra, bnet.inter);
+int = [];
+
+if 1
+% include nodes with any outgoing arcs
+for u=1:ss
+ if any(bnet.inter(u,:))
+ int = [int u];
+ end
+end
+end
+
+if 0
+% include nodes with any incoming arcs
+incoming = [];
+for u=1:ss
+ if any(bnet.inter(:,u))
+ int = [int u];
+ incoming = [incoming u];
+ end
+end
+% include nodes which are parents of nodes with incoming
+for u=1:ss
+ cs = children(bnet.intra, u);
+ if ~isempty(cs) & mysubset(cs, incoming)
+ int = [int u];
+ end
+end
+int = unique(int);
+end % if
+
+int
+engine.interface = int;
+engine.nonint = mysetdiff(1:ss, int);
+
+onodes = bnet.observed;
+
+% Create a "1.5 slice" jtree, containing the interface nodes of slice 1
+% and all the nodes of slice 2
+% To keep the node numbering the same, we simply disconnect the non-interface nodes
+% from slice 1, and set their size to 1.
+% We do this to speed things up, and so that the likelihood is computed correctly - we do not need to do
+% this if we just want to compute marginals (i.e., we can include nodes whose potentials will
+% be left as all 1s).
+intra15 = bnet.intra;
+for i=engine.nonint(:)'
+ intra15(:,i) = 0;
+ intra15(i,:) = 0;
+end
+dag15 = [intra15 bnet.inter;
+ zeros(ss) bnet.intra];
+ns = bnet.node_sizes(:);
+%ns(engine.nonint) = 1; % disconnected nodes get size 1
+obs_nodes = [onodes(:) onodes(:)+ss];
+bnet15 = mk_bnet(dag15, ns, 'discrete', bnet.dnodes, 'equiv_class', bnet.equiv_class(:), ...
+ 'observed', obs_nodes(:));
+
+% use unconstrained elimination,
+% but force there to be a clique containing both interfaces
+clusters(end+1:end+2) = {int, int+ss};
+engine.jtree_engine = jtree_inf_engine(bnet15, 'clusters', clusters, 'root', int+ss);
+jtree_engine = struct(engine.jtree_engine); % violate object privacy
+
+engine.in_clq = clq_containing_nodes(engine.jtree_engine, int);
+engine.out_clq = clq_containing_nodes(engine.jtree_engine, int+ss);
+engine.clq_ass_to_node = jtree_engine.clq_ass_to_node;
+engine.root = jtree_engine.root_clq;
+
+% Also create an engine just for slice 1
+bnet1 = mk_bnet(bnet.intra1, bnet.node_sizes_slice, 'discrete', myintersect(bnet.dnodes,1:ss), ...
+ 'equiv_class', bnet.equiv_class(:,1), 'observed', onodes);
+for i=1:max(bnet1.equiv_class)
+ bnet1.CPD{i} = bnet.CPD{i};
+end
+engine.jtree_engine1 = jtree_inf_engine(bnet1, 'clusters', {int}, 'root', int);
+jtree_engine1 = struct(engine.jtree_engine1); % violate object privacy
+engine.int_clq1 = clq_containing_nodes(engine.jtree_engine1, int);
+engine.clq_ass_to_node1 = jtree_engine1.clq_ass_to_node;
+engine.root1 = jtree_engine1.root_clq;
+
+engine.observed = [onodes onodes+ss];
+engine.observed1 = onodes;
+engine.pot_type = determine_pot_type(bnet, onodes);
+engine.slice_size = bnet.nnodes_per_slice;
+
+engine = class(engine, 'jtree_2TBN_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/back.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/back.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function b = back(engine, bfuture, f, t)
+
+if f.t ~= t
+ error('mixed up time stamps')
+end
+if t==1
+ b = back1(engine, bfuture, f, t);
+ return;
+end
+
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+int = engine.interface;
+D = engine.in_clq;
+C = engine.out_clq;
+phiD = marginalize_pot(bfuture.clpot{D}, int, engine.maximize);
+phiD = set_domain_pot(phiD, int+ss); % shift to slice 2
+phiC = marginalize_pot(f.clpot{C}, int+ss, engine.maximize);
+ratio = divide_by_pot(phiD, phiC);
+f.clpot{C} = multiply_by_pot(f.clpot{C}, ratio);
+
+[b.clpot, seppot] = distribute_evidence(engine.jtree_engine, f.clpot, f.seppot);
+for c=1:length(b.clpot)
+ [b.clpot{c}, ll(c)] = normalize_pot(b.clpot{c});
+end
+b.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/back1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/back1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function b = back1(engine, bfuture, f, t)
+
+if t ~= 1
+ error('mixed up time stamps')
+end
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+int = engine.interface;
+D = engine.in_clq; % from J2
+C = engine.int_clq1; % from J1
+phiD = marginalize_pot(bfuture.clpot{D}, int, engine.maximize);
+phiC = marginalize_pot(f.clpot{C}, int, engine.maximize);
+ratio = divide_by_pot(phiD, phiC);
+f.clpot{C} = multiply_by_pot(f.clpot{C}, ratio);
+
+[b.clpot, seppot] = distribute_evidence(engine.jtree_engine1, f.clpot, f.seppot);
+for c=1:length(b.clpot)
+ [b.clpot{c}, ll(c)] = normalize_pot(b.clpot{c});
+end
+b.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/back1_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/back1_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [b, mpe] = back1_mpe(engine, bfuture, f, ev1, t)
+
+if t ~= 1
+ error('mixed up time stamps')
+end
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+maximize = 1;
+
+int = engine.interface;
+D = engine.in_clq; % from J2
+C = engine.int_clq1; % from J1
+phiD = marginalize_pot(bfuture.clpot{D}, int, maximize);
+phiC = marginalize_pot(f.clpot{C}, int, maximize);
+ratio = divide_by_pot(phiD, phiC);
+f.clpot{C} = multiply_by_pot(f.clpot{C}, ratio);
+
+[mpe, b.clpot] = find_max_config(engine.jtree_engine1, f.clpot, f.seppot, ev1);
+for c=1:length(b.clpot)
+ [b.clpot{c}, ll(c)] = normalize_pot(b.clpot{c});
+end
+b.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/backT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/backT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function b = backT(engine, f, t)
+
+if t==1
+ [b.clpot, seppot] = distribute_evidence(engine.jtree_engine1, f.clpot, f.seppot);
+else
+ [b.clpot, seppot] = distribute_evidence(engine.jtree_engine, f.clpot, f.seppot);
+end
+for c=1:length(b.clpot)
+ [b.clpot{c}, ll(c)] = normalize_pot(b.clpot{c});
+end
+b.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/backT_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/backT_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function [b, mpe] = backT_mpe(engine, f, ev2, t)
+
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+if t==1
+ % ev2 is just the evidence on slice 1
+ [mpe, b.clpot] = find_max_config(engine.jtree_engine1, f.clpot, f.seppot, ev2);
+else
+ [mpe, b.clpot] = find_max_config(engine.jtree_engine, f.clpot, f.seppot, ev2);
+ mpe = mpe((1:ss)+ss); % extract values for slice 2
+end
+for c=1:length(b.clpot)
+ [b.clpot{c}, ll(c)] = normalize_pot(b.clpot{c});
+end
+b.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/back_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/back_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function [b, mpe] = back_mpe(engine, bfuture, f, ev2, t)
+
+if f.t ~= t
+ error('mixed up time stamps')
+end
+if t==1
+ error('should call back1_mpe')
+end
+
+maximize = 1;
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+int = engine.interface;
+D = engine.in_clq;
+C = engine.out_clq;
+phiD = marginalize_pot(bfuture.clpot{D}, int, maximize);
+phiD = set_domain_pot(phiD, int+ss); % shift to slice 2
+phiC = marginalize_pot(f.clpot{C}, int+ss, maximize);
+ratio = divide_by_pot(phiD, phiC);
+f.clpot{C} = multiply_by_pot(f.clpot{C}, ratio);
+
+[mpe, b.clpot] = find_max_config(engine.jtree_engine, f.clpot, f.seppot, ev2);
+mpe = mpe((1:ss)+ss); % extract values for slice 2
+for c=1:length(b.clpot)
+ [b.clpot{c}, ll(c)] = normalize_pot(b.clpot{c});
+end
+b.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/fwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/fwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function [f, logscale] = fwd(engine, fpast, ev, t)
+% Forwards pass.
+
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+ev2 = cell(ss, 2);
+ev2(:,1) = fpast.evidence;
+ev2(:,2) = ev;
+
+CPDpot = cell(1,ss);
+for n=1:ss
+ fam = family(bnet.dag, n, 2);
+ e = bnet.equiv_class(n, 2);
+ CPDpot{n} = convert_to_pot(bnet.CPD{e}, engine.pot_type, fam(:), ev2);
+end
+f.evidence = ev;
+f.t = t;
+
+% get prior
+int = engine.interface;
+if fpast.t==1
+ prior = marginalize_pot(fpast.clpot{engine.int_clq1}, int, engine.maximize);
+else
+ prior = marginalize_pot(fpast.clpot{engine.out_clq}, int+ss, engine.maximize);
+ prior = set_domain_pot(prior, int); % shift back to slice 1
+end
+
+pots = [ {prior} CPDpot ];
+slice1 = 1:ss;
+slice2 = slice1 + ss;
+CPDclqs = engine.clq_ass_to_node(slice2);
+D = engine.in_clq;
+clqs = [D CPDclqs];
+
+[f.clpot, f.seppot] = init_pot(engine.jtree_engine, clqs, pots, engine.pot_type, engine.observed);
+[f.clpot, f.seppot] = collect_evidence(engine.jtree_engine, f.clpot, f.seppot);
+for c=1:length(f.clpot)
+ [f.clpot{c}, ll(c)] = normalize_pot(f.clpot{c});
+end
+logscale = ll(engine.root);
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/fwd1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/fwd1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function [f, logscale] = fwd1(engine, ev, t)
+% Forwards pass for slice 1.
+
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+CPDpot = cell(1,ss);
+for n=1:ss
+ fam = family(bnet.dag, n, 1);
+ e = bnet.equiv_class(n, 1);
+ CPDpot{n} = convert_to_pot(bnet.CPD{e}, engine.pot_type, fam(:), ev);
+end
+f.t = t;
+f.evidence = ev;
+
+pots = CPDpot;
+slice1 = 1:ss;
+CPDclqs = engine.clq_ass_to_node1(slice1);
+
+[f.clpot, f.seppot] = init_pot(engine.jtree_engine1, CPDclqs, CPDpot, engine.pot_type, engine.observed1);
+[f.clpot, f.seppot] = collect_evidence(engine.jtree_engine1, f.clpot, f.seppot);
+for c=1:length(f.clpot)
+ [f.clpot{c}, ll(c)] = normalize_pot(f.clpot{c});
+end
+logscale = ll(engine.root1);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/jtree_2TBN_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/jtree_2TBN_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,69 @@
+function engine = jtree_2TBN_inf_engine(bnet, varargin)
+% JTREE_ONLINE_INF_ENGINE Online Junction tree inference algorithm for DBNs.
+% engine = jtree_online_inf_engine(bnet, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% clusters - specifies variables that must be grouped in the 1.5 slice DBN
+%
+% The same nodes must be observed in every slice.
+%
+% This uses the forwards interface of slice t-1 plus all of slice t.
+% By contrast, jtree_dbn uses all of slice t-1 plus the backwards interface of slice t.
+% See my thesis for details.
+
+
+clusters = {};
+
+args = varargin;
+nargs = length(args);
+for i=1:2:length(args)
+ switch args{i},
+ case 'clusters', clusters = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+end
+
+engine.maximize = 0;
+engine.evidence = [];
+engine.node_sizes = [];
+
+int = compute_fwd_interface(bnet.intra, bnet.inter);
+engine.interface = int;
+ss = length(bnet.intra);
+engine.nonint = mysetdiff(1:ss, int);
+onodes = bnet.observed;
+
+bnet15 = mk_slice_and_half_dbn(bnet, int);
+
+% use unconstrained elimination,
+% but force there to be a clique containing both interfaces
+clusters(end+1:end+2) = {int, int+ss};
+engine.jtree_engine = jtree_inf_engine(bnet15, 'clusters', clusters, 'root', int+ss);
+jtree_engine = struct(engine.jtree_engine); % violate object privacy
+
+engine.in_clq = clq_containing_nodes(engine.jtree_engine, int);
+engine.out_clq = clq_containing_nodes(engine.jtree_engine, int+ss);
+engine.clq_ass_to_node = jtree_engine.clq_ass_to_node;
+engine.root = jtree_engine.root_clq;
+
+% Also create an engine just for slice 1
+bnet1 = mk_bnet(bnet.intra1, bnet.node_sizes_slice, 'discrete', myintersect(bnet.dnodes,1:ss), ...
+ 'equiv_class', bnet.equiv_class(:,1), 'observed', onodes);
+for i=1:max(bnet1.equiv_class)
+ bnet1.CPD{i} = bnet.CPD{i};
+end
+engine.jtree_engine1 = jtree_inf_engine(bnet1, 'clusters', {int}, 'root', int);
+jtree_engine1 = struct(engine.jtree_engine1); % violate object privacy
+engine.int_clq1 = clq_containing_nodes(engine.jtree_engine1, int);
+engine.clq_ass_to_node1 = jtree_engine1.clq_ass_to_node;
+engine.root1 = jtree_engine1.root_clq;
+
+engine.observed = [onodes onodes+ss];
+engine.observed1 = onodes;
+engine.pot_type = determine_pot_type(bnet, onodes);
+engine.slice_size = bnet.nnodes_per_slice;
+
+engine = class(engine, 'jtree_2TBN_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function m = marginal_family(engine, b, i, t, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (jtree_2TBN)
+% marginal = marginal_family(engine, b, i, t, add_ev)
+
+bnet = bnet_from_engine(engine);
+if t==1
+ m = marginal_nodes(engine, b, family(bnet.dag, i), t, add_ev, 1);
+else
+ ss = length(bnet.intra);
+ fam = family(bnet.dag, i+ss);
+ m = marginal_nodes(engine, b, fam, t, add_ev, 1);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function marginal = marginal_nodes(engine, b, nodes, t, add_ev, is_fam)
+% function marginal = marginal_nodes(engine, b, nodes, t, add_ev, is_fam) (jtree_2TBN)
+
+if nargin < 6, is_fam = 0; end
+ss = engine.slice_size;
+
+if ~is_fam & (t > 1) & all(nodes<=ss)
+ nodes = nodes + ss;
+end
+
+if t==1
+ c = clq_containing_nodes(engine.jtree_engine1, nodes, is_fam);
+else
+ c = clq_containing_nodes(engine.jtree_engine, nodes, is_fam);
+end
+if c == -1
+ error(['no clique contains ' nodes])
+end
+bigpot = b.clpot{c};
+pot = marginalize_pot(bigpot, nodes, engine.maximize);
+marginal = pot_to_marginal(pot);
+
+% we convert the domain to the unrolled numbering system
+% so that add_ev_to_dmarginal (maybe called in update_ess) extracts the right evidence.
+if t > 1
+ marginal.domain = nodes+(t-2)*engine.slice_size;
+end
+assert(~add_ev);
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/set_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_2TBN_inf_engine/set_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function engine = set_fields(engine, varargin)
+% SET_FIELDS Set the fields for a generic engine
+% engine = set_fields(engine, name/value pairs)
+%
+% e.g., engine = set_fields(engine, 'maximize', 1)
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'maximize',
+ engine.maximize = args{i+1};
+ engine.jtree_engine = set_fields(engine.jtree_engine, 'maximize', args{i+1});
+ engine.jtree_engine1 = set_fields(engine.jtree_engine1, 'maximize', args{i+1});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+/back.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/back1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/backT.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/fwd.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/fwd1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/jtree_sparse_2TBN_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/online/@jtree_sparse_2TBN_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/back.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/back.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function b = back(engine, bfuture, f, t)
+
+if f.t ~= t
+ error('mixed up time stamps')
+end
+if t==1
+ b = back1(engine, bfuture, f, t);
+ return;
+end
+
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+int = engine.interface;
+D = engine.in_clq;
+C = engine.out_clq;
+phiD = marginalize_pot(bfuture.clpot{D}, int, engine.maximize);
+phiD = set_domain_pot(phiD, int+ss); % shift to slice 2
+phiC = marginalize_pot(f.clpot{C}, int+ss, engine.maximize);
+ratio = divide_by_pot(phiD, phiC);
+f.clpot{C} = multiply_by_pot(f.clpot{C}, ratio);
+
+[b.clpot, seppot] = distribute_evidence(engine.jtree_engine, f.clpot, f.seppot);
+for c=1:length(b.clpot)
+ [b.clpot{c}, ll(c)] = normalize_pot(b.clpot{c});
+end
+b.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/back1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/back1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function b = back1(engine, bfuture, f, t)
+
+if t ~= 1
+ error('mixed up time stamps')
+end
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+int = engine.interface;
+D = engine.in_clq; % from J2
+C = engine.int_clq1; % from J1
+phiD = marginalize_pot(bfuture.clpot{D}, int, engine.maximize);
+phiC = marginalize_pot(f.clpot{C}, int, engine.maximize);
+ratio = divide_by_pot(phiD, phiC);
+f.clpot{C} = multiply_by_pot(f.clpot{C}, ratio);
+
+[b.clpot, seppot] = distribute_evidence(engine.jtree_engine1, f.clpot, f.seppot);
+for c=1:length(b.clpot)
+ [b.clpot{c}, ll(c)] = normalize_pot(b.clpot{c});
+end
+b.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/backT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/backT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function b = backT(engine, f, t)
+
+if t==1
+ [b.clpot, seppot] = distribute_evidence(engine.jtree_engine1, f.clpot, f.seppot);
+else
+ [b.clpot, seppot] = distribute_evidence(engine.jtree_engine, f.clpot, f.seppot);
+end
+for c=1:length(b.clpot)
+ [b.clpot{c}, ll(c)] = normalize_pot(b.clpot{c});
+end
+b.t = t;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (jtree_online)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product instead of sum-product [0]
+%
+
+engine.maximize = 0;
+args = varargin;
+for i=1:2:length(args)
+ switch args{i}
+ case 'maximize', engine.maximize = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+end
+
+[engine, loglik] = offline_smoother(engine, evidence);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/fwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/fwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+function [f, logscale] = fwd(engine, fpast, ev, t)
+% Forwards pass.
+
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+ev2 = cell(ss, 2);
+ev2(:,1) = fpast.evidence;
+ev2(:,2) = ev;
+CPDpot = cell(1,ss);
+for n=1:ss
+ fam = family(bnet.dag, n, 2);
+ e = bnet.equiv_class(n, 2);
+ CPDpot{n} = convert_to_pot(bnet.CPD{e}, engine.pot_type, fam(:), ev2);
+end
+f.evidence = ev;
+f.t = t;
+
+% get prior
+int = engine.interface;
+if fpast.t==1
+ prior = marginalize_pot(fpast.clpot{engine.int_clq1}, int, engine.maximize);
+else
+ prior = marginalize_pot(fpast.clpot{engine.out_clq}, int+ss, engine.maximize);
+ prior = set_domain_pot(prior, int); % shift back to slice 1
+end
+
+pots = [ {prior} CPDpot ];
+slice1 = 1:ss;
+slice2 = slice1 + ss;
+CPDclqs = engine.clq_ass_to_node(slice2);
+D = engine.in_clq;
+clqs = [D CPDclqs];
+
+[f.clpot, f.seppot] = init_pot(engine.jtree_engine, clqs, pots, engine.pot_type, engine.observed);
+[f.clpot, f.seppot] = collect_evidence(engine.jtree_engine, f.clpot, f.seppot);
+for c=1:length(f.clpot)
+ if isa(f.clpot{c}, 'struct')
+ domain = f.clpot{c}.domain;
+ sizes = f.clpot{c}.sizes;
+ T = f.clpot{c}.T;
+ f.clpot{c} = dpot(domain, sizes, T);
+ end
+ [f.clpot{c}, ll(c)] = normalize_pot(f.clpot{c});
+end
+logscale = ll(engine.root);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/fwd1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/fwd1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function [f, logscale] = fwd1(engine, ev, t)
+% Forwards pass for slice 1.
+
+bnet = bnet_from_engine(engine);
+ss = bnet.nnodes_per_slice;
+
+CPDpot = cell(1,ss);
+for n=1:ss
+ fam = family(bnet.dag, n, 1);
+ e = bnet.equiv_class(n, 1);
+ CPDpot{n} = convert_to_pot(bnet.CPD{e}, engine.pot_type, fam(:), ev);
+end
+f.evidence = ev;
+f.t = t;
+
+pots = CPDpot;
+slice1 = 1:ss;
+CPDclqs = engine.clq_ass_to_node1(slice1);
+
+[f.clpot, f.seppot] = init_pot(engine.jtree_engine1, CPDclqs, CPDpot, engine.pot_type, engine.observed1);
+[f.clpot, f.seppot] = collect_evidence(engine.jtree_engine1, f.clpot, f.seppot);
+for c=1:length(f.clpot)
+ if isa(f.clpot{c}, 'struct')
+ domain = f.clpot{c}.domain;
+ sizes = f.clpot{c}.sizes;
+ T = f.clpot{c}.T;
+ f.clpot{c} = dpot(domain, sizes, T);
+ end
+ [f.clpot{c}, ll(c)] = normalize_pot(f.clpot{c});
+end
+logscale = ll(engine.root1);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/jtree_sparse_2TBN_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/jtree_sparse_2TBN_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,95 @@
+function engine = jtree_sparse_2TBN_inf_engine(bnet, varargin)
+% JTREE_ONLINE_INF_ENGINE Online Junction tree inference algorithm for DBNs.
+% engine = jtree_online_inf_engine(bnet, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% clusters - specifies variables that must be grouped in the 1.5 slice DBN
+% maximize - 1 means do max-product, 0 means sum-product [0]
+%
+% The same nodes must be observed in every slice.
+
+ss = length(bnet.intra);
+clusters = {};
+engine.maximize = 0;
+
+args = varargin;
+nargs = length(args);
+for i=1:2:length(args)
+ switch args{i},
+ case 'clusters', clusters = args{i+1};
+ case 'maximize', engine.maximize = args{i+1};
+ otherwise, error(['unrecognized argument ' args{i}])
+ end
+end
+
+engine.evidence = [];
+engine.node_sizes = [];
+
+int = [];
+% include nodes with any outgoing arcs
+for u=1:ss
+ if any(bnet.inter(u,:))
+ int = [int u];
+ end
+end
+
+engine.interface = int;
+engine.nonint = mysetdiff(1:ss, int);
+
+onodes = bnet.observed;
+
+% Create a "1.5 slice" jtree, containing the interface nodes of slice 1
+% and all the nodes of slice 2
+% To keep the node numbering the same, we simply disconnect the non-interface nodes
+% from slice 1, and set their size to 1.
+% We do this to speed things up, and so that the likelihood is computed correctly - we do not need to do
+% this if we just want to compute marginals (i.e., we can include nodes whose potentials will
+% be left as all 1s).
+intra15 = bnet.intra;
+for i=engine.nonint(:)'
+ intra15(:,i) = 0;
+ intra15(i,:) = 0;
+ assert(~any(bnet.inter(i,:)))
+end
+dag15 = [intra15 bnet.inter;
+ zeros(ss) bnet.intra];
+ns = bnet.node_sizes(:);
+ns(engine.nonint) = 1; % disconnected nodes get size 1
+obs_nodes = [onodes(:) onodes(:)+ss];
+bnet15 = mk_bnet(dag15, ns, 'discrete', bnet.dnodes, 'equiv_class', bnet.equiv_class(:), ...
+ 'observed', obs_nodes(:));
+
+% use unconstrained elimination,
+% but force there to be a clique containing both interfaces
+clusters(end+1:end+2) = {int, int+ss};
+%engine.jtree_engine = jtree_inf_engine(bnet15, 'clusters', clusters, 'root', int+ss);
+engine.jtree_engine = jtree_sparse_inf_engine(bnet15, 'clusters', clusters, 'root', int+ss);
+jtree_engine = struct(engine.jtree_engine); % violate object privacy
+
+engine.in_clq = clq_containing_nodes(engine.jtree_engine, int);
+engine.out_clq = clq_containing_nodes(engine.jtree_engine, int+ss);
+engine.clq_ass_to_node = jtree_engine.clq_ass_to_node;
+engine.root = jtree_engine.root_clq;
+
+% Also create an engine just for slice 1
+bnet1 = mk_bnet(bnet.intra1, bnet.node_sizes_slice, 'discrete', myintersect(bnet.dnodes,1:ss), ...
+ 'equiv_class', bnet.equiv_class(:,1), 'observed', onodes);
+for i=1:max(bnet1.equiv_class)
+ bnet1.CPD{i} = bnet.CPD{i};
+end
+%engine.jtree_engine1 = jtree_inf_engine(bnet1, 'clusters', {int}, 'root', int);
+engine.jtree_engine1 = jtree_sparse_inf_engine(bnet1, 'clusters', {int}, 'root', int);
+jtree_engine1 = struct(engine.jtree_engine1); % violate object privacy
+engine.int_clq1 = clq_containing_nodes(engine.jtree_engine1, int);
+engine.clq_ass_to_node1 = jtree_engine1.clq_ass_to_node;
+engine.root1 = jtree_engine1.root_clq;
+
+engine.observed = [onodes onodes+ss];
+engine.observed1 = onodes;
+engine.pot_type = determine_pot_type(bnet, onodes);
+engine.slice_size = bnet.nnodes_per_slice;
+
+engine = class(engine, 'jtree_sparse_2TBN_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function m = marginal_family(engine, b, i, t, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (jtree_2TBN)
+% marginal = marginal_family(engine, b, i, t, add_ev)
+
+bnet = bnet_from_engine(engine);
+if t==1
+ m = marginal_nodes(engine, b, family(bnet.dag, i), t, add_ev, 1);
+else
+ ss = length(bnet.intra);
+ fam = family(bnet.dag, i+ss);
+ m = marginal_nodes(engine, b, fam, t, add_ev, 1);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function marginal = marginal_nodes(engine, b, nodes, t, add_ev, is_fam)
+% function marginal = marginal_nodes(engine, b, nodes, t, add_ev, is_fam) (jtree_2TBN)
+
+if nargin < 6, is_fam = 0; end
+ss = engine.slice_size;
+
+if ~is_fam & (t > 1) & all(nodes<=ss)
+ nodes = nodes + ss;
+end
+
+if t==1
+ c = clq_containing_nodes(engine.jtree_engine1, nodes, is_fam);
+else
+ c = clq_containing_nodes(engine.jtree_engine, nodes, is_fam);
+end
+if c == -1
+ error(['no clique contains ' nodes])
+end
+bigpot = b.clpot{c};
+pot = marginalize_pot(bigpot, nodes, engine.maximize);
+marginal = pot_to_marginal(pot);
+
+% we convert the domain to the unrolled numbering system
+% so that add_ev_to_dmarginal (maybe called in update_ess) extracts the right evidence.
+if t > 1
+ marginal.domain = nodes+(t-2)*engine.slice_size;
+end
+assert(~add_ev);
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+/bnet_from_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Mon Jun 17 23:46:46 2002//
+/find_mpe.m/1.1.1.1/Mon Jun 17 23:50:16 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/smoother_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/update_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/online/@smoother_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/bnet_from_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/bnet_from_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function bnet = bnet_from_engine(engine)
+% BNET_FROM_ENGINE Return the bnet structure stored inside the engine (smoother_engine)
+% bnet = bnet_from_engine(engine)
+
+bnet = bnet_from_engine(engine.tbn_engine);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function [engine, LL] = enter_evidence(engine, ev)
+% ENTER_EVIDENCE Call the offline smoother
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+
+T = size(ev, 2);
+f = cell(1,T);
+b = cell(1,T); % b{t}.clpot{c}
+ll = zeros(1,T);
+[f{1}, ll(1)] = fwd1(engine.tbn_engine, ev(:,1), 1);
+for t=2:T
+ [f{t}, ll(t)] = fwd(engine.tbn_engine, f{t-1}, ev(:,t), t);
+end
+LL = sum(ll);
+b{T} = backT(engine.tbn_engine, f{T}, T);
+for t=T-1:-1:1
+ b{t} = back(engine.tbn_engine, b{t+1}, f{t}, t);
+end
+engine.b = b;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/find_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/find_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,33 @@
+function mpe = find_mpe(engine, ev)
+% FIND_MPE Find the most probable explanation (Viterbi)
+% mpe = enter_evidence(engine, evidence, ...)
+%
+% evidence{i,t} = [] if if X(i,t) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+
+mpe = cell(size(ev));
+engine.tbn_engine = set_fields(engine.tbn_engine, 'maximize', 1);
+
+T = size(ev, 2);
+f = cell(1,T);
+b = cell(1,T); % b{t}.clpot{c}
+ll = zeros(1,T);
+[f{1}, ll(1)] = fwd1(engine.tbn_engine, ev(:,1), 1);
+for t=2:T
+ [f{t}, ll(t)] = fwd(engine.tbn_engine, f{t-1}, ev(:,t), t);
+end
+
+if T==1
+ [b{1}, mpe(:,1)] = backT_mpe(engine.tbn_engine, f{1}, ev(:,1), 1);
+else
+ [b{T}, mpe(:,T)] = backT_mpe(engine.tbn_engine, f{T}, ev(:,T-1:T), T);
+ for t=T-1:-1:2
+ [b{t}, mpe(:,t)] = back_mpe(engine.tbn_engine, b{t+1}, f{t}, ev(:,t-1:t), t);
+ end
+ t = 1;
+ [b{t}, mpe(:,t)] = back1_mpe(engine.tbn_engine, b{t+1}, f{t}, ev(:,1), t);
+end
+engine.b = b;
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function marginal = marginal_family(engine, i, t, add_ev)
+% MARGINAL_FAMILY Compute the joint distribution on a set of family (smoother_engine)
+% function marginal = marginal_family(engine, i, t, add_ev)
+
+if nargin < 4, add_ev = 0; end
+marginal = marginal_family(engine.tbn_engine, engine.b{t}, i, t, add_ev);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function marginal = marginal_nodes(engine, nodes, t, add_ev)
+% MARGINAL_NODES Compute the joint distribution on a set of nodes (smoother_engine)
+% function marginal = marginal_nodes(engine, nodes, t, add_ev)
+
+if nargin < 4, add_ev = 0; end
+
+marginal = marginal_nodes(engine.tbn_engine, engine.b{t}, nodes, t, add_ev);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/smoother_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/smoother_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function engine = smoother_engine(tbn_engine)
+% SMOOTHER_ENGINE Create an engine which does offline (fixed-interval) smoothing in O(T) space/time
+% function engine = smoother_engine(tbn_engine)
+%
+% tbn_engine is any 2TBN inference engine which supports the following methods:
+% fwd, fwd1, back, backT, back, marginal_nodes and marginal_family.
+
+engine.tbn_engine = tbn_engine;
+engine.b = []; % space to store smoothed messages
+engine = class(engine, 'smoother_engine');
+%engine = class(engine, 'smoother_engine', inf_engine(bnet_from_engine(tbn_engine)));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/update_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/@smoother_engine/update_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function engine = update_engine(engine, newCPDs)
+% UPDATE_ENGINE Update the engine to take into account the new parameters (smoother_engine).
+% engine = update_engine(engine, newCPDs)
+
+engine.tbn_engine = update_engine(engine.tbn_engine, newCPDs);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/dummy/1.1.1.1/Sat Jan 18 22:22:38 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+A D/@filter_engine////
+A D/@hmm_2TBN_inf_engine////
+A D/@jtree_2TBN_inf_engine////
+A D/@jtree_sparse_2TBN_inf_engine////
+A D/@smoother_engine////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/online
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/online/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/online/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/belprop_fg_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/find_mpe.m/1.1.1.1/Thu Jun 20 00:02:12 2002//
+/loopy_converged.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/set_params.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@belprop_fg_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/belprop_fg_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/belprop_fg_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+function engine = belprop_fg_inf_engine(fg, varargin)
+% BELPROP_FG_INF_ENGINE Make a belief propagation inference engine for factor graphs
+% engine = belprop_fg_inf_engine(factor_graph, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default in brackets]
+% e.g., engine = belprop_inf_engine(fg, 'tol', 1e-2, 'max_iter', 10)
+%
+% max_iter - max. num. iterations [ 2*num_nodes ]
+% momentum - weight assigned to old message in convex combination (useful for damping oscillations) [0]
+% tol - tolerance used to assess convergence [1e-3]
+% maximize - 1 means use max-product, 0 means use sum-product [0]
+%
+% This uses potential objects, like belprop_inf_engine, and hence is quite slow.
+
+engine = init_fields;
+engine = class(engine, 'belprop_fg_inf_engine');
+
+% set params to default values
+N = length(fg.G);
+engine.max_iter = 2*N;
+engine.momentum = 0;
+engine.tol = 1e-3;
+engine.maximize = 0;
+
+% parse optional arguments
+engine = set_params(engine, varargin);
+
+engine.fgraph = fg;
+
+% store results computed by enter_evidence here
+engine.marginal_nodes = cell(1, fg.nvars);
+engine.evidence = [];
+
+
+%%%%%%%%%%%%
+
+function engine = init_fields()
+
+engine.fgraph = [];
+engine.max_iter = [];
+engine.momentum = [];
+engine.tol = [];
+engine.maximize = [];
+engine.marginal_nodes = [];
+engine.evidence = [];
+engine.niter = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,126 @@
+function [engine, ll, niter] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Propagate evidence using belief propagation
+% [engine, ll, niter] = enter_evidence(engine, evidence, ...)
+%
+% The log-likelihood is not computed; ll = 0.
+% niter contains the number of iterations used
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - 1 means use max-product, 0 means use sum-product [0]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+ll = 0;
+maximize = 0;
+
+if nargin >= 3
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+verbose = 0;
+
+ns = engine.fgraph.node_sizes;
+onodes = find(~isemptycell(evidence));
+hnodes = find(isemptycell(evidence));
+cnodes = engine.fgraph.cnodes;
+pot_type = determine_pot_type(engine.fgraph, onodes);
+
+% prime each local kernel with evidence (if any)
+nfactors = engine.fgraph.nfactors;
+nvars = engine.fgraph.nvars;
+factors = cell(1,nfactors);
+for f=1:nfactors
+ K = engine.fgraph.factors{engine.fgraph.equiv_class(f)};
+ factors{f} = convert_to_pot(K, pot_type, engine.fgraph.dom{f}(:), evidence);
+end
+
+% initialise msgs
+msg_var_to_fac = cell(nvars, nfactors);
+for x=1:nvars
+ for f=engine.fgraph.dep{x}
+ msg_var_to_fac{x,f} = mk_initial_pot(pot_type, x, ns, cnodes, onodes);
+ end
+end
+msg_fac_to_var = cell(nfactors, nvars);
+dom = cell(1, nfactors);
+for f=1:nfactors
+ %hdom{f} = myintersect(engine.fgraph.dom{f}, hnodes);
+ dom{f} = engine.fgraph.dom{f}(:)';
+ for x=dom{f}
+ msg_fac_to_var{f,x} = mk_initial_pot(pot_type, x, ns, cnodes, onodes);
+ %msg_fac_to_var{f,x} = marginalize_pot(factors{f}, x);
+ end
+end
+
+
+
+converged = 0;
+iter = 1;
+var_prod = cell(1, nvars);
+fac_prod = cell(1, nfactors);
+
+while ~converged & (iter <= engine.max_iter)
+ if verbose, fprintf('iter %d\n', iter); end
+
+ % absorb
+ old_var_prod = var_prod;
+ for x=1:nvars
+ var_prod{x} = mk_initial_pot(pot_type, x, ns, cnodes, onodes);
+ for f=engine.fgraph.dep{x}
+ var_prod{x} = multiply_by_pot(var_prod{x}, msg_fac_to_var{f,x});
+ end
+ end
+ for f=1:nfactors
+ fac_prod{f} = mk_initial_pot(pot_type, dom{f}, ns, cnodes, onodes);
+ for x=dom{f}
+ fac_prod{f} = multiply_by_pot(fac_prod{f}, msg_var_to_fac{x,f});
+ end
+ end
+
+ % send msgs to neighbors
+ old_msg_var_to_fac = msg_var_to_fac;
+ old_msg_fac_to_var = msg_fac_to_var;
+ converged = 1;
+ for x=1:nvars
+ %if verbose, disp(['var ' num2str(x) ' sending to fac ' num2str(engine.fgraph.dep{x})]); end
+ for f=engine.fgraph.dep{x}
+ temp = divide_by_pot(var_prod{x}, old_msg_fac_to_var{f,x});
+ msg_var_to_fac{x,f} = normalize_pot(temp);
+ if ~approxeq_pot(msg_var_to_fac{x,f}, old_msg_var_to_fac{x,f}, engine.tol), converged = 0; end
+ end
+ end
+ for f=1:nfactors
+ %if verbose, disp(['fac ' num2str(f) ' sending to var ' num2str(dom{f})]); end
+ for x=dom{f}
+ temp = divide_by_pot(fac_prod{f}, old_msg_var_to_fac{x,f});
+ temp2 = multiply_by_pot(factors{f}, temp);
+ temp3 = marginalize_pot(temp2, x, maximize);
+ msg_fac_to_var{f,x} = normalize_pot(temp3);
+ if ~approxeq_pot(msg_fac_to_var{f,x}, old_msg_fac_to_var{f,x}, engine.tol), converged = 0; end
+ end
+ end
+
+ if iter==1
+ converged = 0;
+ end
+ iter = iter + 1;
+end
+
+niter = iter - 1;
+engine.niter = niter;
+
+for x=1:nvars
+ engine.marginal_nodes{x} = normalize_pot(var_prod{x});
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/find_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/find_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function mpe = find_mpe(engine, evidence, varargin)
+% FIND_MPE Find the most probable explanation of the data (belprop_fg)
+% function mpe = find_mpe(engine, evidence,...)
+%
+% evidence{i} = [] if X(i) is hidden, and otherwise contains its observed value (scalar or column vector).
+%
+% This finds the marginally most likely value for each hidden node,
+% and may give the wrong results even if the graph is acyclic,
+% unless you set break_ties = 1.
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% break_ties is optional. If 1, we will force ties to be broken consistently
+% by calling enter_evidence N times. (see Jensen96, p106) Default = 1.
+
+break_ties = 1;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'break_ties', break_ties = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+
+engine = enter_evidence(engine, evidence, 'maximize', 1);
+
+observed = ~isemptycell(evidence);
+evidence = evidence(:); % hack to handle unrolled DBNs
+N = length(evidence);
+mpe = cell(1,N);
+for i=1:N
+ m = marginal_nodes(engine, i);
+ % observed nodes are all set to 1 inside the inference engine, so we must undo this
+ if observed(i)
+ mpe{i} = evidence{i};
+ else
+ mpe{i} = argmax(m.T);
+ if break_ties
+ evidence{i} = mpe{i};
+ [engine, ll] = enter_evidence(engine, evidence, 'maximize', 1);
+ end
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/loopy_converged.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/loopy_converged.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function niter = loopy_converged(engine)
+% LOOPY_CONVERGED Did loopy belief propagation converge? 0 means no, eles we return the num. iterations.
+% function niter = loopy_converged(engine)
+%
+% We use a simple heuristic: we say convergence occurred if the number of iterations
+% used was less than the maximum allowed.
+
+if engine.niter == engine.max_iter
+ niter = 0;
+else
+ niter = engine.niter;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function marginal = marginal_nodes(engine, query)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (belprop)
+% marginal = marginal_nodes(engine, query)
+
+assert(length(query)==1);
+marginal = pot_to_marginal(engine.marginal_nodes{query});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/set_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_fg_inf_engine/set_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function engine = set_params(engine, varargin)
+% SET_PARAMS Set the parameters (fields) for a belprop_inf_engine object
+% engine = set_params(engine, name/value pairs)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% e.g., engine = set_params(engine, 'tol', 1e-2, 'max_iter', 10)
+%
+% max_iter - max. num. loopy iterations
+% momentum - weight assigned to old message in convex combination
+% tol - tolerance used to assess convergence
+% maximize - 1 means use max-product, 0 means use sum-product
+
+args = varargin{1};
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'max_iter', engine.max_iter = args{i+1};
+ case 'momentum', engine.momentum = args{i+1};
+ case 'tol', engine.tol = args{i+1};
+ case 'maximize', engine.maximize = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/belprop_inf_engine.m/1.1.1.1/Tue Dec 31 19:00:06 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/find_mpe.m/1.1.1.1/Wed Jun 19 22:08:40 2002//
+/loopy_converged.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+A D/Old////
+A D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@belprop_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/belprop_gdl_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/belprop_inf_engine_nostr.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_domain.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@belprop_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/belprop_gdl_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/belprop_gdl_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,67 @@
+function engine = belprop_gdl_inf_engine(gdl, varargin)
+% BELPROP_GDL_INF_ENGINE Make a belief propagation inference engine for a GDL graph
+% engine = belprop_gdl_inf_engine(gdl_graph, ...)
+%
+% If the GDL graph is a tree, this will give exact results.
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default in brackets]
+% e.g., engine = belprop_inf_engine(gdl, 'tol', 1e-2, 'max_iter', 10)
+%
+% protocol - 'tree' means send messages up then down the tree,
+% 'parallel' means use synchronous updates ['parallel']
+% max_iter - max. num. iterations [ 2*num_nodes ]
+% momentum - weight assigned to old message in convex combination (useful for damping oscillations) [0]
+% tol - tolerance used to assess convergence [1e-3]
+% maximize - 1 means use max-product, 0 means use sum-product [0]
+
+
+engine = init_fields;
+engine = class(engine, 'belprop_gdl_inf_engine');
+
+% set default params
+N = length(gdl.G);
+engine.protocol = 'parallel';
+engine.max_iter = 2*N;
+engine.momentum = 0;
+engine.tol = 1e-3;
+engine.maximize = 0;
+
+engine = set_params(engine, varargin);
+
+engine.gdl = gdl;
+
+if strcmp(engine.protocol, 'tree')
+ % Make a rooted tree, so there is a fixed message passing order.
+ root = N;
+ [engine.tree, engine.preorder, engine.postorder, height, cyclic] = mk_rooted_tree(gdl.G, root);
+ assert(~cyclic);
+end
+
+% store results computed by enter_evidence here
+ndoms = length(gdl.doms);
+nvars = length(gdl.vars);
+engine.marginal_domains = cell(1, ndoms);
+
+% to compute the marginal on each variable, we need to know which domain to marginalize
+% and we want to choose the lightest. We compute the weight once we have seen the evidence.
+engine.dom_weight = [];
+engine.evidence = [];
+
+
+%%%%%%%%%
+
+function engine = init_fields()
+
+engine.protocol = [];
+engine.gdl = [];
+engine.max_iter = [];
+engine.momentum = [];
+engine.tol = [];
+engine.maximize = [];
+engine.marginal_domains = [];
+engine.evidence = [];
+engine.tree = [];
+engine.preorder = [];
+engine.postorder = [];
+engine.dom_weight = [];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/belprop_inf_engine_nostr.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/belprop_inf_engine_nostr.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+function engine = belprop_inf_engine(fg, max_iter, momentum, tol, maximize)
+
+if nargin < 2, max_iter = length(fg.G); end
+if nargin < 3, momentum = 0; end
+if nargin < 4, tol = 1e-3; end
+if nargin < 5, maximize = 0; end
+
+engine.fgraph = fg;
+engine.max_iter = max_iter;
+engine.momentum = momentum;
+engine.tol = tol;
+engine.maximize = maximize;
+
+% store results computed by enter_evidence here
+ndoms = length(fg.doms);
+nvars = length(fg.vars);
+engine.marginal_domains = cell(1, ndoms);
+
+% to compute the marginal on each variable, we need to know which domain to marginalize
+% so we represent each domain as a bit vector, and compute its (pre-evidence) weight
+engine.dom_weight = [];
+
+% engine.dom_bitv = sparse(ndoms, nvars);
+% ns = fg.node_sizes;
+% for i=1:ndoms
+% engine.dom_bitv(i, fg.doms{i}) = 1;
+% engine.dom_weight(i) = prod(ns(fg.doms{i}));
+% end
+
+
+engine = class(engine, 'belprop_inf_engine');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,80 @@
+function engine = enter_evidence(engine, evidence)
+
+doms = engine.fg.doms;
+ndoms = length(doms);
+ns = engine.fg.node_sizes;
+obs = find(~isemptycell(evidence));
+cobs = myintersect(obs, engine.fg.cnodes);
+dobs = myintersect(obs, engine.fg.dnodes);
+ns(cobs) = 0;
+ns(dobs) = 1;
+
+% prime each local kernel with evidence (if any)
+local_kernel = cell(1, ndoms);
+for i=1:length(engine.fg.kernels_of_type)
+ u = engine.fg.kernels_of_type{i};
+ local_kernel(u) = kernel_to_dpots(engine.fg.kernels{i}, evidence, engine.fg.domains_of_type{i});
+end
+
+% initialise all msgs to 1s
+nedges = engine.fg.nedges;
+msg = cell(1, nedges);
+for i=1:nedges
+ msg{i} = dpot(engine.fg.sepset{i}, ns(engine.fg.sepset{i}));
+end
+
+prod_of_msg = cell(1, ndoms);
+bel = cell(1, ndoms);
+old_bel = cell(1, ndoms);
+
+converged = 0;
+iter = 1;
+while ~converged & (iter <= engine.max_iter)
+
+ % each node multiplies all its incoming msgs
+ for i=1:ndoms
+ prod_of_msg{i} = dpot(doms{i}, ns(doms{i}));
+ nbrs = engine.fg.nbrs{i};
+ for j=1:length(nbrs)
+ ndx = engine.fg.edge_ndx(j,i);
+ prod_of_msg{i} = multiply_by_pot(prod_of_msg{i}, msg{ndx});
+ end
+ end
+ old_msg = msg;
+
+ % each node computes its local belief
+ for i=1:ndoms
+ bel{i} = normalize_pot(multiply_pots(prod_of_msg{i}, local_kernel{i}));
+ end
+
+ % converged?
+ converged = 1;
+ for i=1:ndoms
+ if ~approxeq(bel{i}, old_bel{i}, engine.tol)
+ converged = 0;
+ break;
+ end
+ end
+
+ if ~converged
+ % each node sends a msg to each of its neighbors
+ for i=1:ndoms
+ nbrs = engine.fg.nbrs{i};
+ for j=1:length(nbrs)
+ % multiply all incoming msgs except from j
+ temp = prod_of_msg{i};
+ ndx = engine.fg.edge_ndx(j,i);
+ temp = divide_by_pot(temp, old_msg{ndx});
+ % send msg from i to j
+ temp = multiply_by_pot(temp, local_kernel{i});
+ ndx = engine.fg.edge_ndx(i,j);
+ msg{ndx} = normalize_pot(marginalize_pot(temp, engine.fg.sepset{ndx}));
+ end
+ end
+ end
+
+ iter = iter + 1;
+end
+
+
+engine.marginal = bel;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/enter_evidence1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/enter_evidence1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,94 @@
+function engine = enter_evidence(engine, evidence)
+
+doms = engine.fgraph.doms;
+ndoms = length(doms);
+ns = engine.fgraph.node_sizes;
+obs = find(~isemptycell(evidence));
+cobs = myintersect(obs, engine.fgraph.cnodes);
+dobs = myintersect(obs, engine.fgraph.dnodes);
+ns(cobs) = 0;
+ns(dobs) = 1;
+
+% recompute the weight of each domain now that we know what nodes are observed
+for i=1:ndoms
+ engine.dom_weight(i) = prod(ns(engine.fgraph.doms{i}));
+end
+
+% prime each local kernel with evidence (if any)
+local_kernel = cell(1, ndoms);
+for i=1:length(engine.fgraph.kernels_of_type)
+ u = engine.fgraph.kernels_of_type{i};
+ local_kernel(u) = kernel_to_dpots(engine.fgraph.kernels{i}, evidence, engine.fgraph.domains_of_type{i});
+end
+
+% initialise all msgs to 1s
+msg = cell(ndoms, ndoms);
+for i=1:ndoms
+ nbrs = engine.fgraph.nbrs{i};
+ for j=nbrs(:)'
+ dom = engine.fgraph.sepset{i,j};
+ msg{i,j} = dpot(dom, ns(dom));
+ end
+end
+
+prod_of_msg = cell(1, ndoms);
+bel = cell(1, ndoms);
+old_bel = cell(1, ndoms);
+
+converged = 0;
+iter = 1;
+while ~converged & (iter <= engine.max_iter)
+
+ % each node multiplies all its incoming msgs
+ for i=1:ndoms
+ prod_of_msg{i} = dpot(doms{i}, ns(doms{i}));
+ nbrs = engine.fgraph.nbrs{i};
+ for j=nbrs(:)'
+ prod_of_msg{i} = multiply_by_pot(prod_of_msg{i}, msg{j,i});
+ end
+ end
+
+ % each node computes its local belief
+ old_bel = bel;
+ for i=1:ndoms
+ bel{i} = normalize_pot(multiply_pots(prod_of_msg{i}, local_kernel{i}));
+ end
+
+ % converged?
+ if iter==1
+ converged = 0;
+ else
+ converged = 1;
+ for i=1:ndoms
+ belT = get_params(bel{i}, 'table');
+ old_belT = get_params(old_bel{i}, 'table');
+ if ~approxeq(belT, old_belT, engine.tol)
+ converged = 0;
+ break;
+ end
+ end
+ end
+
+ if ~converged
+ old_msg = msg;
+ % each node sends a msg to each of its neighbors
+ for i=1:ndoms
+ nbrs = engine.fgraph.nbrs{i};
+ for j=nbrs(:)'
+ % multiply all incoming msgs except from j
+ temp = prod_of_msg{i};
+ temp = divide_by_pot(temp, old_msg{j,i});
+ % send msg from i to j
+ temp = multiply_by_pot(temp, local_kernel{i});
+ msg{i,j} = normalize_pot(marginalize_pot(temp, engine.fgraph.sepset{i,j}));
+ end
+ end
+ end
+
+ iter = iter + 1
+end
+
+engine.marginal_domains = bel;
+%for i=1:ndoms
+ %engine.marginal_domains{i} = get_params(bel{i}, 'table');
+%end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/marginal_domain.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/Old/marginal_domain.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function marginal = marginal_domain(engine, i)
+% MARGINAL_DOMAIN Return the marginal on the specified domain (belprop)
+% marginal = marginal_domain(engine, i)
+
+marginal = pot_to_marginal(engine.marginal_domains{i});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/belprop_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/belprop_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,90 @@
+function engine = belprop_inf_engine(bnet, varargin)
+% BELPROP_INF_ENGINE Make a loopy belief propagation inference engine
+% engine = belprop_inf_engine(bnet, ...)
+%
+% This is like pearl_inf_engine, except it uses potential objects,
+% instead of lambda/pi structs. Hence it is slower.
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default in brackets]
+%
+% protocol - 'tree' means send messages up then down the tree,
+% 'parallel' means use synchronous updates ['parallel']
+% max_iter - max. num. iterations [ 2*num_nodes ]
+% momentum - weight assigned to old message in convex combination (useful for damping oscillations) [0]
+% tol - tolerance used to assess convergence [1e-3]
+% maximize - 1 means use max-product, 0 means use sum-product [0]
+% filename - name of file to write beliefs to after each iteration within enter_evidence [ [] ]
+%
+% e.g., engine = belprop_inf_engine(bnet, 'maximize', 1, 'max_iter', 10)
+
+% gdl = general distributive law
+engine.gdl = bnet_to_gdl(bnet);
+
+% set default params
+N = length(engine.gdl.G);
+engine.protocol = 'parallel';
+engine.max_iter = 2*N;
+engine.momentum = 0;
+engine.tol = 1e-3;
+engine.maximize = 0;
+engine.filename = [];
+engine.fid = [];
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'max_iter', engine.max_iter = args{i+1};
+ case 'momentum', engine.momentum = args{i+1};
+ case 'tol', engine.tol = args{i+1};
+ case 'protocol', engine.protocol = args{i+1};
+ case 'filename', engine.filename = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+
+
+if strcmp(engine.protocol, 'tree')
+ % Make a rooted tree, so there is a fixed message passing order.
+ root = N;
+ [engine.tree, engine.preorder, engine.postorder, height, cyclic] = mk_rooted_tree(engine.gdl.G, root);
+ assert(~cyclic);
+end
+
+% store results computed by enter_evidence here
+engine.marginal_domains = cell(1, N);
+
+engine.niter = [];
+
+engine = class(engine, 'belprop_inf_engine', inf_engine(bnet));
+
+%%%%%%%%%
+
+function gdl = bnet_to_gdl(bnet)
+
+gdl.G = mk_undirected(bnet.dag);
+N = length(bnet.dag);
+gdl.doms = cell(1,N);
+for i=1:N
+ gdl.doms{i} = family(bnet.dag, i);
+end
+
+% Compute a bit vector representation of the set of domains
+% dom_bitv(i,j) = 1 iff variable j occurs in domain i
+gdl.dom_bitv = zeros(N, N);
+for i=1:N
+ gdl.dom_bitv(i, gdl.doms{i}) = 1;
+end
+
+% compute the interesection of the domains on either side of each edge (separating set)
+gdl.sepset = cell(N, N);
+gdl.nbrs = cell(1,N);
+for i=1:N
+ nbrs = neighbors(gdl.G, i);
+ gdl.nbrs{i} = nbrs;
+ for j = nbrs(:)'
+ gdl.sepset{i,j} = myintersect(gdl.doms{i}, gdl.doms{j});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+function [engine, ll, niter] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Propagate evidence using belief propagation
+% [engine, ll, niter] = enter_evidence(engine, evidence, ...)
+%
+% The log-likelihood is not computed; ll = 0.
+% niter contains the number of iterations used (if engine.protocol = 'parallel')
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - 1 means use max-product, 0 means use sum-product [0]
+% exclude - list of nodes whose potential will not be included in the joint [ [] ]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+
+ll = 0;
+exclude = [];
+maximize = 0;
+
+if nargin >= 3
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'exclude', exclude = args{i+1};
+ case 'maximize', maximize = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+engine.maximize = maximize;
+
+if ~isempty(engine.filename)
+ engine.fid = fopen(engine.filename, 'w');
+ if engine.fid == 0
+ error(['can''t open ' engine.filename]);
+ end
+else
+ engine.fid = [];
+end
+
+gdl = engine.gdl;
+bnet = bnet_from_engine(engine);
+
+ndoms = length(gdl.doms);
+ns = bnet.node_sizes;
+onodes = find(~isemptycell(evidence));
+pot_type = determine_pot_type(bnet, onodes);
+
+% prime each local kernel with evidence (if any)
+local_kernel = cell(1, ndoms);
+for i=1:ndoms
+ if myismember(i, exclude)
+ local_kernel{i} = mk_initial_pot(pot_type, gdl.doms{i}, ns, bnet.cnodes, onodes);
+ else
+ e = bnet.equiv_class(i);
+ local_kernel{i} = convert_to_pot(bnet.CPD{e}, pot_type, gdl.doms{i}(:), evidence);
+ end
+end
+
+% initialise all msgs to 1s
+msg = cell(ndoms, ndoms);
+for i=1:ndoms
+ nbrs = gdl.nbrs{i};
+ for j=nbrs(:)'
+ dom = gdl.sepset{i,j};
+ msg{i,j} = mk_initial_pot(pot_type, dom, ns, bnet.cnodes, onodes);
+ end
+end
+
+switch engine.protocol
+ case 'parallel',
+ [engine.marginal_domains, niter] = parallel_protocol(engine, evidence, pot_type, local_kernel, msg);
+ case 'tree',
+ engine.marginal_domains = serial_protocol(engine, evidence, pot_type, local_kernel, msg);
+ niter = 1;
+end
+engine.niter = niter;
+
+%fprintf('just finished %d iterations of belprop\n', niter);
+
+if ~isempty(engine.filename)
+ fclose(engine.fid);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/find_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/find_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function mpe = find_mpe(engine, evidence, varargin)
+% FIND_MPE Find the most probable explanation of the data (belprop)
+% function mpe = find_mpe(engine, evidence,...)
+%
+% evidence{i} = [] if X(i) is hidden, and otherwise contains its observed value (scalar or column vector).
+%
+% This finds the marginally most likely value for each hidden node,
+% and may give the wrong results even if the graph is acyclic,
+% unless you set break_ties = 1.
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% break_ties is optional. If 1, we will force ties to be broken consistently
+% by calling enter_evidence N times. (see Jensen96, p106) Default = 1.
+
+break_ties = 1;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'break_ties', break_ties = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+
+engine = enter_evidence(engine, evidence, 'maximize', 1);
+
+observed = ~isemptycell(evidence);
+evidence = evidence(:); % hack to handle unrolled DBNs
+N = length(evidence);
+mpe = cell(1,N);
+for i=1:N
+ m = marginal_nodes(engine, i);
+ % observed nodes are all set to 1 inside the inference engine, so we must undo this
+ if observed(i)
+ mpe{i} = evidence{i};
+ else
+ mpe{i} = argmax(m.T);
+ if break_ties
+ evidence{i} = mpe{i};
+ [engine, ll] = enter_evidence(engine, evidence, 'maximize', 1);
+ end
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/loopy_converged.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/loopy_converged.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function niter = loopy_converged(engine)
+% LOOPY_CONVERGED Did loopy belief propagation converge? 0 means no, eles we return the num. iterations.
+% function niter = loopy_converged(engine)
+%
+% We use a simple heuristic: we say convergence occurred if the number of iterations
+% used was less than the maximum allowed.
+
+if engine.niter == engine.max_iter
+ niter = 0;
+else
+ niter = engine.niter;
+end
+%conv = (strcmp(engine.protocol, 'tree') | (engine.niter < engine.max_iter));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function [marginal, pot] = marginal_family(engine, query)
+% MARGINAL_NODES Compute the marginal on the family of the specified query node (belprop)
+% [marginal, pot] = marginal_family(engine, query)
+
+pot = engine.marginal_domains{query};
+marginal = pot_to_marginal(pot);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function [marginal, pot] = marginal_nodes(engine, query)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (belprop)
+% [marginal, pot] = marginal_nodes(engine, query)
+%
+% query must be a subset of a family
+
+if isempty(query)
+ big_pot = engine.marginal_domains{1}; % pick an arbitrary domain
+else
+ big_pot = engine.marginal_domains{query(end)};
+end
+pot = marginalize_pot(big_pot, query);
+marginal = pot_to_marginal(pot);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/junk/1.1.1.1/Wed May 29 15:59:56 2002//
+/parallel_protocol.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/tree_protocol.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@belprop_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/private/junk
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_inf_engine/private/junk Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,68 @@
+fgraph
+fgraph%fgraph%fgraph%fgraph%fgraph%fgraph%fgraph%fgraph%fgraph%fgraph%fgraph%fgraph
+fgraph
+fgraphffgraphufgraphnfgraphcfgraphtfgraphifgraphofgraphnfgraph fgraph[fgraphbfgraphefgraphlfgraph,fgraph fgraphifgraphtfgraphefgraphrfgraph]fgraph fgraph=fgraph fgraphpfgraphafgraphrfgraphafgraphlfgraphlfgraphefgraphlfgraph_fgraphpfgraphrfgraphofgraphtfgraphofgraphcfgraphofgraphlfgraph(fgraphefgraphnfgraphgfgraphifgraphnfgraphefgraph,fgraph fgraphefgraphvfgraphifgraphdfgraphefgraphnfgraphcfgraphefgraph,fgraph fgraphpfgraphofgraphtfgraph_fgraphtfgraphyfgraphpfgraphefgraph,fgraph fgraphlfgraphofgraphcfgraphafgraphlfgraph_fgraphkfgraphefgraphrfgraphnfgraphefgraphlfgraph,fgraph fgraphmfgraphsfgraphgfgraph)fgraph
+fgraph
+fgraphdfgraphofgraphmfgraphsfgraph fgraph=fgraph fgraphefgraphnfgraphgfgraphifgraphnfgraphefgraph.fgraphffgraphgfgraphrfgraphafgraphpfgraphhfgraph.fgraphdfgraphofgraphmfgraphsfgraph;fgraph
+fgraphnfgraphdfgraphofgraphmfgraphsfgraph fgraph=fgraph fgraphlfgraphefgraphnfgraphgfgraphtfgraphhfgraph(fgraphdfgraphofgraphmfgraphsfgraph)fgraph;fgraph
+fgraphnfgraphsfgraph fgraph=fgraph fgraphefgraphnfgraphgfgraphifgraphnfgraphefgraph.fgraphffgraphgfgraphrfgraphafgraphpfgraphhfgraph.fgraphnfgraphofgraphdfgraphefgraph_fgraphsfgraphifgraphzfgraphefgraphsfgraph;fgraph
+fgraphofgraphnfgraphofgraphdfgraphefgraphsfgraph fgraph=fgraph fgraphffgraphifgraphnfgraphdfgraph(fgraph~fgraphifgraphsfgraphefgraphmfgraphpfgraphtfgraphyfgraphcfgraphefgraphlfgraphlfgraph(fgraphefgraphvfgraphifgraphdfgraphefgraphnfgraphcfgraphefgraph)fgraph)fgraph;fgraph
+fgraphcfgraphnfgraphofgraphdfgraphefgraphsfgraph fgraph=fgraph fgraphefgraphnfgraphgfgraphifgraphnfgraphefgraph.fgraphffgraphgfgraphrfgraphafgraphpfgraphhfgraph.fgraphcfgraphnfgraphofgraphdfgraphefgraphsfgraph;fgraph
+fgraph
+fgraphpfgraphrfgraphofgraphdfgraph_fgraphofgraphffgraph_fgraphmfgraphsfgraphgfgraph fgraph=fgraph fgraphcfgraphefgraphlfgraphlfgraph(fgraph1fgraph,fgraph fgraphnfgraphdfgraphofgraphmfgraphsfgraph)fgraph;fgraph
+fgraphbfgraphefgraphlfgraph fgraph=fgraph fgraphcfgraphefgraphlfgraphlfgraph(fgraph1fgraph,fgraph fgraphnfgraphdfgraphofgraphmfgraphsfgraph)fgraph;fgraph
+fgraphofgraphlfgraphdfgraph_fgraphbfgraphefgraphlfgraph fgraph=fgraph fgraphcfgraphefgraphlfgraphlfgraph(fgraph1fgraph,fgraph fgraphnfgraphdfgraphofgraphmfgraphsfgraph)fgraph;fgraph
+fgraph
+fgraphcfgraphofgraphnfgraphvfgraphefgraphrfgraphgfgraphefgraphdfgraph fgraph=fgraph fgraph0fgraph;fgraph
+fgraphifgraphtfgraphefgraphrfgraph fgraph=fgraph fgraph1fgraph;fgraph
+fgraphwfgraphhfgraphifgraphlfgraphefgraph fgraph~fgraphcfgraphofgraphnfgraphvfgraphefgraphrfgraphgfgraphefgraphdfgraph fgraph&fgraph fgraph(fgraphifgraphtfgraphefgraphrfgraph fgraph temp(k)=0, so can replace 0's with anything
+ temp = temp ./ m;
+ temp_div = temp;
+ end
+
+ if 1
+ % Compute temp = product of all incoming msgs except from j in obvious way
+ if use_cell
+ %temp = ones(nstates(i),1);
+ temp = local_evidence{i};
+ for k=nbrs(:)'
+ if k==j, continue, end;
+ temp = temp .* old_msg{edge_id(k,i)};
+ end
+ else
+ %temp = ones(nstates,1);
+ temp = local_evidence(:,i);
+ for k=nbrs(:)'
+ if k==j, continue, end;
+ temp = temp .* old_msg(:, edge_id(k,i));
+ end
+ end
+ end
+ %assert(approxeq(temp, temp_div))
+ assert(approxeq(normalise(pot_ij * temp), normalise(pot_ij * temp_div)))
+
+ if maximize
+ newm = max_mult(pot_ij, temp); % bottleneck
+ else
+ newm = pot_ij * temp;
+ end
+ newm = normalise(newm);
+ if use_cell
+ new_msg{edge_id(i,j)} = newm;
+ else
+ new_msg(:, edge_id(i,j)) = newm;
+ end
+ end % for j
+ end % for i
+ old_prod_of_msgs = prod_of_msgs;
+
+ % each node multiplies all its incoming msgs and computes its local belief
+ if use_cell
+ for i=1:nnodes
+ nbrs = find(adj_mat(:,i));
+ prod_of_msgs{i} = local_evidence{i};
+ for j=nbrs(:)'
+ prod_of_msgs{i} = prod_of_msgs{i} .* new_msg{edge_id(j,i)};
+ end
+ new_bel{i} = normalise(prod_of_msgs{i});
+ end
+ err = abs(cat(1,new_bel{:}) - cat(1, old_bel{:}));
+ else
+ for i=1:nnodes
+ nbrs = find(adj_mat(:,i));
+ prod_of_msgs(:,i) = local_evidence(:,i);
+ for j=nbrs(:)'
+ prod_of_msgs(:,i) = prod_of_msgs(:,i) .* new_msg(:,edge_id(j,i));
+ end
+ new_bel(:,i) = normalise(prod_of_msgs(:,i));
+ end
+ err = abs(new_bel(:) - old_bel(:));
+ end
+ converged = all(err < tol);
+ if verbose, fprintf('error at iter %d = %f\n', iter, sum(err)); end
+ if ~isempty(fn)
+ if isempty(fnargs)
+ feval(fn, new_bel);
+ else
+ feval(fn, new_bel, iter, fnargs{:});
+ end
+ end
+
+ iter = iter + 1;
+ old_msg = new_msg;
+ old_bel = new_bel;
+end % while
+
+niter = iter-1;
+
+fprintf('converged in %d iterations\n', niter);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_mrf2_inf_engine/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_mrf2_inf_engine/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function [engine, ll, niter] = enter_soft_evidence(engine, local_evidence)
+% ENTER_SOFT_EVIDENCE Propagate evidence using belief propagation
+% [engine, ll, niter] = enter_soft_evidence(engine, local_evidence)
+%
+% local_evidence{i}(j) = Pr(observation at node i | S(i)=j)
+%
+% The log-likelihood is not computed; ll = 0.
+% niter contains the number of iterations used
+
+ll = 0;
+mrf2 = engine.mrf2;
+[bel, niter] = bp_mrf2(mrf2.adj_mat, mrf2.pot, local_evidence, ...
+ 'max_iter', engine.max_iter, 'momentum', engine.momentum, ...
+ 'tol', engine.tol, 'maximize', 0, 'verbose', engine.verbose);
+engine.bel = bel;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_mrf2_inf_engine/find_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_mrf2_inf_engine/find_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function mpe = find_mpe(engine, local_evidence)
+% FIND_MPE Find the most probable explanation of the data
+% function mpe = find_mpe(engine, local_evidence
+%
+% local_evidence{i}(j) = Pr(observation at node i | S(i)=j)
+%
+% This finds the marginally most likely value for each hidden node.
+% It may give inconsistent results if there are ties.
+
+[mpe, niter] = bp_mpe_mrf2(engine.mrf2.adj_mat, engine.mrf2.pot, local_evidence, ...
+ 'max_iter', engine.max_iter, 'momentum', engine.momentum, ...
+ 'tol', engine.tol);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_mrf2_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_mrf2_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function marginal = marginal_nodes(engine, query)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (belprop)
+% marginal = marginal_nodes(engine, query)
+%
+% query must be a single node
+
+if length(query)>1
+ error('can only handle single node marginals')
+end
+marginal = engine.bel{query};
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_mrf2_inf_engine/set_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@belprop_mrf2_inf_engine/set_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function engine = set_params(engine, varargin)
+% SET_PARAMS Modify parameters of the inference engine
+% engine = set_params(engine, 'param1',val1, 'param2',val2, ...)
+%
+% Parameter names are listed below.
+%
+% max_iter - max. num. iterations
+% momentum - weight assigned to old message in convex combination
+% (useful for damping oscillations)
+% tol - tolerance used to assess convergence
+% verbose - 1 means print error at every iteration [0]
+
+[engine.max_iter, engine.momentum, engine.tol, engine.verbose] = ...
+ process_options('max_iter', engine.max_iter, 'momentum', engine.momentum, ...
+ 'tol', engine.tol, 'verbose', engine.verbose);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/cond_gauss_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@cond_gauss_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/cond_gauss_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/cond_gauss_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+function engine = cond_gauss_inf_engine(bnet)
+% COND_GAUSS_INF_ENGINE Conditional Gaussian inference engine
+% engine = cond_gauss_inf_engine(bnet)
+%
+% Enumerates all the discrete roots, and runs jtree on the remaining Gaussian nodes.
+
+dnodes = mysetdiff(1:length(bnet.dag), bnet.cnodes);
+
+%onodes = dnodes; % all the discrete ndoes will be observed
+%engine.sub_engine = jtree_inf_engine(bnet, onodes);
+bnet2 = bnet;
+bnet2.observed = dnodes;
+engine.sub_engine = jtree_inf_engine(bnet2);
+
+% This is where we will store the results between enter_evidence and marginal_nodes
+engine.T = [];
+engine.mu = [];
+engine.Sigma = [];
+engine.joint_dmarginal = [];
+engine.onodes = []; % needed for marginal_nodes
+engine.evidence = []; % needed for marginal_nodes add_ev
+
+engine = class(engine, 'cond_gauss_inf_engine', inf_engine(bnet));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (cond_gauss)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i} = [] if if X(i) is hidden, and otherwise contains its observed value (scalar or column vector)
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes(:);
+observed = ~isemptycell(evidence);
+onodes = find(observed);
+hnodes = find(isemptycell(evidence));
+engine.evidence = evidence;
+
+% check there are no C->D links where C is hidden
+pot_type = determine_pot_type(bnet, onodes);
+
+dhid = myintersect(hnodes, bnet.dnodes);
+S = prod(ns(dhid));
+T = zeros(S,1);
+
+N = length(bnet.dag);
+mu = cell(1,N);
+Sigma = cell(1,N);
+cobs = myintersect(bnet.cnodes, onodes);
+chid = myintersect(bnet.cnodes, hnodes);
+ens = ns;
+ens(cobs) = 0;
+for j=chid(:)'
+ mu{j} = zeros(ens(j), S);
+ Sigma{j} = zeros(ens(j), ens(j), S);
+end
+
+for i=1:S
+ dvals = ind2subv(ns(dhid), i);
+ evidence(dhid) = num2cell(dvals);
+ [sub_engine, loglik] = enter_evidence(engine.sub_engine, evidence);
+ for j=chid(:)'
+ m = marginal_nodes(sub_engine, j);
+ mu{j}(:,i) = m.mu;
+ Sigma{j}(:,:,i) = m.Sigma;
+ end
+ T(i) = exp(loglik);
+end
+
+[T, lik] = normalise(T);
+loglik = log(lik);
+
+engine.T = T;
+engine.mu = mu;
+engine.Sigma = Sigma;
+
+dnodes = bnet.dnodes;
+dobs = myintersect(dnodes, onodes);
+ens(dobs) = 1;
+engine.joint_dmarginal = dpot(dnodes, ens(dnodes), myreshape(engine.T, ens(dnodes)));
+
+engine.onodes = onodes;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@cond_gauss_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+function marginal = marginal_nodes(engine, query, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (cond_gauss)
+% marginal = marginal_nodes(engine, query, add_ev)
+%
+% 'query' must be a singleton set
+% add_ev is an optional argument; if 1, we will "inflate" the marginal of observed nodes
+% to their original size, adding 0s to the positions which contradict the evidence
+
+if nargin < 3, add_ev = 0; end
+
+if length(query) ~= 1
+ error('cond_gauss_inf_engine can only handle marginal queries on single nodes')
+end
+j = query;
+bnet = bnet_from_engine(engine);
+
+if myismember(j, bnet.cnodes)
+ if ~myismember(j, engine.onodes)
+ [m, C] = collapse_mog(engine.mu{j}, engine.Sigma{j}, engine.T);
+ marginal.mu = m;
+ marginal.Sigma = C;
+ marginal.T = 1.0; % single mixture component
+ else
+ marginal.mu = engine.evidence{j};
+ k = bnet.node_sizes(j);
+ marginal.Sigma = zeros(k,k);
+ marginal.T = 1.0; % since P(E|E)=1
+ end
+else
+ marginal = pot_to_marginal(marginalize_pot(engine.joint_dmarginal, j));
+ if add_ev
+ marginal = add_ev_to_dmarginal(marginal, engine.evidence, bnet.node_sizes);
+ end
+end
+
+marginal.domain = query;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enumerative_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@enumerative_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function [engine, loglik] = enter_evidence(engine, evidence)
+% ENTER_EVIDENCE Add the specified evidence to the network (enumerative_inf)
+% [engine, loglik] = enter_evidence(engine, evidence)
+%
+% evidence{i} = [] if if X(i) is hidden, and otherwise contains its observed value (scalar or column vector)
+
+engine.evidence = evidence;
+if nargout == 2
+ [m, loglik] = marginal_nodes(engine, []);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/enumerative_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/enumerative_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function engine = enumerative_inf_engine(bnet)
+% ENUMERATIVE_INF_ENGINE Inference engine for fully discrete BNs that uses exhaustive enumeration.
+% engine = enumerative_inf_engine(bnet)
+
+
+assert(isempty(bnet.cnodes));
+
+% This is where we store stuff between enter_evidence and marginal_nodes
+engine.evidence = [];
+
+engine = class(engine, 'enumerative_inf_engine', inf_engine(bnet));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@enumerative_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function [marginal, loglik] = marginal_nodes(engine, query)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (enumerative_inf)
+% [marginal, loglik] = marginal_nodes(engine, query)
+
+
+if isempty(query) & nargout < 2
+ marginal.T = 1;
+ marginal.domain = [];
+ return;
+end
+
+evidence = engine.evidence;
+bnet = bnet_from_engine(engine);
+assert(isempty(bnet.cnodes));
+n = length(bnet.dag);
+observed = ~isemptycell(evidence);
+vals = cat(1,evidence{observed});
+vals = vals(:)';
+ns = bnet.node_sizes;
+
+sz = ns(query);
+T = 0*myones(sz);
+p = 0;
+for i=1:prod(ns)
+ inst = ind2subv(ns, i); % i'th instantiation
+ if isempty(vals) | inst(observed) == vals % agrees with evidence
+ prob = exp(log_lik_complete(bnet, num2cell(inst(:))));
+ p = p + prob;
+ v = inst(query);
+ j = subv2ind(sz, v);
+ T(j) = T(j) + prob;
+ end
+end
+
+[T, lik] = normalise(T);
+lik = p;
+loglik = log(lik);
+
+Tsmall = shrink_obs_dims_in_table(T, query, evidence);
+marginal.domain = query;
+marginal.T = Tsmall;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/gaussian_inf_engine.m/1.1.1.1/Fri May 14 01:13:26 2004//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@gaussian_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (gaussian_inf_engine)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i} = [] if if X(i) is hidden, and otherwise contains its observed value (scalar or column vector)
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes;
+O = find(~isemptycell(evidence));
+H = find(isemptycell(evidence));
+vals = cat(1, evidence{O});
+
+% Compute Pr(H|o)
+[Hmu, HSigma, loglik] = condition_gaussian(engine.mu, engine.Sigma, H, O, vals(:), ns);
+
+engine.Hmu = Hmu;
+engine.HSigma = HSigma;
+engine.hnodes = H;
+
+%%%%%%%%
+
+function [mu2, Sigma2, loglik] = condition_gaussian(mu, Sigma, X, Y, y, ns)
+% CONDITION_GAUSSIAN Compute Pr(X|Y=y) where X and Y are jointly Gaussian.
+% [mu2, Sigma2, ll] = condition_gaussian(mu, Sigma, X, Y, y, ns)
+
+if isempty(y)
+ mu2 = mu;
+ Sigma2 = Sigma;
+ loglik = 0;
+ return;
+end
+
+use_log = 1;
+
+if length(Y)==length(mu) % instantiating every variable
+ mu2 = y;
+ Sigma2 = zeros(length(y));
+ loglik = gaussian_prob(y, mu, Sigma, use_log);
+ return;
+end
+
+[muX, muY, SXX, SXY, SYX, SYY] = partition_matrix_vec(mu, Sigma, X, Y, ns);
+K = SXY*inv(SYY);
+mu2 = muX + K*(y-muY);
+Sigma2 = SXX - K*SYX;
+loglik = gaussian_prob(y, muY, SYY, use_log);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/gaussian_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/gaussian_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function engine = gaussian_inf_engine(bnet)
+% GAUSSIAN_INF_ENGINE Computes the joint multivariate Gaussian corresponding to the bnet
+% engine = gaussian_inf_engine(bnet)
+%
+% For details on how to compute the joint Gaussian from the bnet, see
+% - "Gaussian Influence Diagrams", R. Shachter and C. R. Kenley, Management Science, 35(5):527--550, 1989.
+% Once we have the Gaussian, we can apply the standard formulas for conditioning and marginalization.
+
+assert(isequal(bnet.cnodes, 1:length(bnet.dag)));
+
+[W, D, mu] = extract_params_from_gbn(bnet);
+U = inv(eye(size(W)) - W')';
+Sigma = U' * D * U;
+
+engine.mu = mu;
+engine.Sigma = Sigma;
+%engine.logp = log(normal_coef(Sigma));
+
+% This is where we will store the results between enter_evidence and marginal_nodes
+engine.Hmu = [];
+engine.HSigma = [];
+engine.hnodes = [];
+
+engine = class(engine, 'gaussian_inf_engine', inf_engine(bnet));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function marginal = marginal_nodes(engine, query)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (gaussian)
+% marginal = marginal_nodes(engine, query)
+
+% Compute sum_{Hsum} Pr(Hkeep, Hsum | o)
+H = engine.hnodes;
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes;
+Hkeep = myintersect(H, query);
+Hsum = mysetdiff(H, Hkeep);
+
+[marginal.mu, marginal.Sigma] = marginalize_gaussian(engine.Hmu, engine.HSigma, Hkeep, Hsum, ns);
+marginal.domain = query;
+marginal.T = 1;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/extract_params_from_gbn.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@gaussian_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/private/extract_params_from_gbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gaussian_inf_engine/private/extract_params_from_gbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function [B,D,mu] = extract_params_from_gbn(bnet)
+% Extract all the local parameters of each Gaussian node, and collect them into global matrices.
+% [B,D,mu] = extract_params_from_gbn(bnet)
+%
+% B(i,j) is a block matrix that contains the transposed weight matrix from node i to node j.
+% D(i,i) is a block matrix that contains the noise covariance matrix for node i.
+% mu(i) is a block vector that contains the shifted noise mean for node i.
+
+% In Shachter's model, the mean of each node in the global gaussian is
+% the same as the node's local unconditional mean.
+% In Alag's model (which we use), the global mean gets shifted.
+
+
+num_nodes = length(bnet.dag);
+bs = bnet.node_sizes(:); % bs = block sizes
+N = sum(bs); % num scalar nodes
+
+B = zeros(N,N);
+D = zeros(N,N);
+mu = zeros(N,1);
+
+for i=1:num_nodes % in topological order
+ ps = parents(bnet.dag, i);
+ e = bnet.equiv_class(i);
+ %[m, Sigma, weights] = extract_params_from_CPD(bnet.CPD{e});
+ s = struct(bnet.CPD{e}); % violate privacy of object
+ m = s.mean; Sigma = s.cov; weights = s.weights;
+ if length(ps) == 0
+ mu(block(i,bs)) = m;
+ else
+ mu(block(i,bs)) = m + weights * mu(block(ps,bs));
+ end
+ B(block(ps,bs), block(i,bs)) = weights';
+ D(block(i,bs), block(i,bs)) = Sigma;
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/gibbs_sampling_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@gibbs_sampling_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function [engine, loglik] = enter_evidence(engine, evidence)
+% ENTER_EVIDENCE Add the specified evidence to the network (gibbs_sampling_inf_engine)
+% [engine, loglik] = enter_evidence(engine, evidence)
+%
+% evidence{i} = [] if if X(i) is hidden, and otherwise contains its observed value
+%
+% loglik is not computed... we just return a 0 value
+
+bnet = bnet_from_engine(engine);
+
+engine.hnodes = find(isemptycell(evidence));
+engine.onodes = mysetdiff(1:length(evidence), engine.hnodes);
+
+engine.evidence = zeros(engine.slice_size, 1);
+
+% Reset all counts since they are no longer valid
+engine.marginal_counts = {};
+%engine.state = sample_bnet (bnet, 1, 0);
+engine.state = cell2num(sample_bnet(bnet));
+
+% For speed, we use a normal (not cell) array. We're making use of
+% the current restriction to discrete nodes.
+for i = engine.onodes
+ engine.evidence(i) = evidence{i};
+end
+
+loglik = 0;
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/gibbs_sampling_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/gibbs_sampling_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,104 @@
+function engine = gibbs_sampling_inf_engine(bnet, varargin)
+% GIBBS_SAMPLING_INF_ENGINE
+%
+% engine = gibbs_sampling_inf_engine(bnet, ...)
+%
+% Optional parameters [default in brackets]
+% 'burnin' - How long before you start using the samples [100].
+% 'gap' - how often you use the samples in the estimate [1].
+% 'T' - number of samples [1000]
+% i.e, number of node flips (so, for
+% example if there are 10 nodes in the bnet, and T is 1000, each
+% node will get flipped 100 times (assuming a deterministic schedule))
+% The total running time is proportional to burnin + T*gap.
+%
+% 'order' - if the sampling schedule is deterministic, use this
+% parameter to specify the order in which nodes are sampled.
+% Order is allowed to include multiple copies of nodes, which is
+% useful if you want to, say, focus sampling on particular nodes.
+% Default is to use a deterministic schedule that goes through the
+% nodes in order.
+%
+% 'sampling_dist' - when using a stochastic sampling method, at
+% each step the node to sample is chosen according to this
+% distribution (may be unnormalized)
+%
+% The sampling_dist and order parameters shouldn't both be used,
+% and this will cause an assert.
+%
+%
+% Written by "Bhaskara Marthi" Feb 02.
+
+
+engine.burnin = 100;
+engine.gap = 1;
+engine.T = 1000;
+use_default_order = 1;
+engine.deterministic = 1;
+engine.order = {};
+engine.sampling_dist = {};
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ for i = 1:2:nargs
+ switch args{i}
+ case 'burnin'
+ engine.burnin = args{i+1};
+ case 'gap'
+ engine.gap = args{i+1};
+ case 'T'
+ engine.T = args{i+1};
+ case 'order'
+ assert (use_default_order);
+ use_default_order = 0;
+ engine.order = args{i+1};
+ case 'sampling_dist'
+ assert (use_default_order);
+ use_default_order = 0;
+ engine.deterministic = 0;
+ engine.sampling_dist = args{i+1};
+ otherwise
+ error(['unrecognized parameter to gibbs_sampling_inf_engine']);
+ end
+ end
+end
+
+engine.slice_size = size(bnet.dag, 2);
+if (use_default_order)
+ engine.order = 1:engine.slice_size;
+end
+engine.hnodes = [];
+engine.onodes = [];
+engine.evidence = [];
+engine.state = [];
+engine.marginal_counts = {};
+
+% Precompute the strides for each CPT
+engine.strides = compute_strides(bnet);
+
+% Precompute graphical information
+engine.families = compute_families(bnet);
+engine.children = compute_children(bnet);
+
+% For convenience, store the CPTs as tables rather than objects
+engine.CPT = get_cpts(bnet);
+
+engine = class(engine, 'gibbs_sampling_inf_engine', inf_engine(bnet));
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,135 @@
+function [marginal, engine] = marginal_nodes(engine, nodes, varargin);
+% MARGINAL_NODES Compute the marginal on the specified query nodes
+% (gibbs_sampling_engine)
+% [marginal, engine] = marginal_nodes(engine, nodes, ...)
+%
+% returns Pr(X(nodes) | X(observedNodes))
+%
+% The engine is also modified, and so it is returned as well, since
+% Matlab doesn't support passing by reference(!) So
+% if you want to, for example, incrementally run gibbs for a few 100
+% steps at a time, you should use the returned value.
+%
+% Optional arguments :
+%
+% 'reset_counts' is 1 if you want to reset the counts made in the
+% past, and 0 otherwise (if the current query nodes are different
+% from the previous query nodes, or if marginal_nodes has not been
+% called before, reset_counts should be set to 1).
+% By default it is 1.
+
+
+reset_counts = 1;
+
+if (nargin > 3)
+ args = varargin;
+ nargs = length(args);
+ for i = 1:2:nargs
+ switch args{i}
+ case 'reset_counts'
+ reset_counts = args{i+1};
+ otherwise
+ error(['Incorrect argument to gibbs_sampling_engine/' ...
+ ' marginal_nodes']);
+ end
+ end
+end
+
+% initialization stuff
+bnet = bnet_from_engine(engine);
+slice_size = engine.slice_size;
+hnodes = engine.hnodes;
+onodes = engine.onodes;
+nonqnodes = mysetdiff(1:slice_size, nodes);
+gap = engine.gap;
+burnin = engine.burnin;
+T_max = engine.T;
+ns = bnet.node_sizes(nodes);
+
+
+% Cache the strides for the marginal table
+marg_strides = [1 cumprod(ns(1:end-1))];
+
+% Reset counts if necessary
+if (reset_counts == 1)
+ %state = sample_bnet(bnet, 1, 0);
+ %state = cell2num(sample_bnet(bnet, 'evidence', num2cell(engine.evidence)));
+ state = cell2num(sample_bnet(bnet));
+ state(onodes) = engine.evidence(onodes);
+ if (length(ns) == 1)
+ marginal_counts = zeros(ns(1),1);
+ else
+ marginal_counts = zeros(ns);
+ end
+
+% Otherwise, use the counts that have been stored in the engine
+else
+ state = engine.state;
+ state(onodes, :) = engine.evidence(onodes, :);
+ marginal_counts = engine.marginal_counts;
+end
+
+if (engine.deterministic == 1)
+ pos = 1;
+ order = engine.order;
+ orderSize = length(engine.order);
+else
+ sampling_dist = normalise(engine.sampling_dist);
+end
+
+
+for t = 1:(T_max*gap+burnin)
+
+ % First, select node m to sample
+ if (engine.deterministic == 1)
+ m = engine.order(pos);
+ pos = pos+1;
+ if (pos > orderSize)
+ pos = 1;
+ end
+ else
+ m = my_sample_discrete(sampling_dist);
+ end
+
+
+ % If the node is observed, then don't bother resampling
+ if (myismember(m, onodes))
+ continue;
+ end
+
+ % Next, compute the posterior
+ post = compute_posterior (bnet, state, m, engine.strides, engine.families, ...
+ engine.children, engine.CPT);
+ state(m) = my_sample_discrete(post);
+
+ % Now update our monte carlo estimate of the posterior
+ % distribution on the query node
+ if ((mod(t-burnin, gap) == 0) & (t > burnin))
+
+ vals = state(nodes);
+ index = 1+marg_strides*(vals-1);
+ marginal_counts(index) = marginal_counts(index)+1;
+ end
+end
+
+% Store results for future computation. Note that we store
+% unnormalized counts
+engine.state = state;
+engine.marginal_counts = marginal_counts;
+
+marginal.T = normalise(marginal_counts);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/CPT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/CPT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function c = CPT(bnet, i)
+% CPT Helper function avoid having to type in
+% CPD_to_CPT(bnet.CPD{i}) every time
+
+c = CPD_to_CPT(bnet.CPD{i});
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+/CPT.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/compute_children.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/compute_families.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/compute_families_dbn.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/compute_posterior.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/compute_posterior_dbn.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/compute_strides.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/get_cpts.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/get_slice_dbn.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/get_slice_dbn.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/my_sample_discrete.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/sample_single_discrete.c/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@gibbs_sampling_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_children.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_children.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function c = compute_children(bnet)
+% COMPUTE_CHILDREN
+% precomputes the children of nodes in a bnet
+%
+% The return value is a cell array for now
+
+ss = size(bnet.dag, 1);
+c = cell(ss, 1);
+for i = 1:ss
+ c{i} = children(bnet.dag, i);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_families.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_families.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function families = compute_families(bnet)
+% COMPUTE_FAMILIES
+% precomputes the families of nodes in a bnet
+%
+% The return value is a cell array for now
+
+ss = size(bnet.dag, 1);
+families = cell(ss, 1);
+for i = 1:ss
+ families{i} = family(bnet.dag, i);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_families_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_families_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function families = compute_families_dbn(bnet)
+% COMPUTE_FAMILIES
+% precomputes the families of nodes in a dbn
+%
+% The return value is a cell array for now
+
+ss = size(bnet.intra, 1);
+families = cell(ss, 2);
+for i = 1:ss
+ families{i, 1} = family(bnet.dag, i, 1);
+ families{i, 2} = family(bnet.dag, i, 2);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,107 @@
+#include "mex.h"
+
+/* Helper function that extracts a one-dimensional slice from a cpt */
+/*
+void multiplySlice(mxArray *bnet, mxArray *state, int i, int nsi, int j,
+ mxArray *strides, mxArray *fam, mxArray *cpts,
+ double *y)
+*/
+void multiplySlice(const mxArray *bnet, const mxArray *state, int i, int nsi, int j,
+ const mxArray *strides, const mxArray *fam, const mxArray *cpts,
+ double *y)
+{
+ mxArray *ec, *cpt, *family;
+ double *ecElts, *cptElts, *famElts, *strideElts, *ev;
+ int c1, k, famSize, startInd, strideStride, pos, stride;
+
+ strideStride = mxGetM(strides);
+ strideElts = mxGetPr(strides);
+
+ ev = mxGetPr(state);
+
+ /* Get the CPT */
+ ec = mxGetField (bnet, 0, "equiv_class");
+ ecElts = mxGetPr(ec);
+ k = (int) ecElts[j-1];
+ cpt = mxGetCell (cpts, k-1);
+ cptElts = mxGetPr (cpt);
+
+ /* Get the family vector for this cpt */
+ family = mxGetCell (fam, j-1);
+ famSize = mxGetNumberOfElements (family);
+ famElts = mxGetPr (family);
+
+ /* Figure out starting position and stride */
+ startInd = 0;
+ for (c1 = 0, pos = k-1; c1 < famSize; c1++, pos +=strideStride) {
+ if (famElts[c1] != i) {
+ startInd += strideElts[pos]*(ev[(int)famElts[c1]-1]-1);
+ }
+ else {
+ stride = strideElts[pos];
+ }
+ }
+
+ for (c1 = 0, pos = startInd; c1 < nsi; c1++, pos+=stride) {
+ y[c1] *= cptElts[pos];
+ }
+}
+
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray
+ *prhs[])
+{
+ double *pi, *nsElts, *y, *childrenElts;
+ mxArray *ns, *children;
+ double sum;
+ int i, nsi, c1, numChildren;
+
+ pi = mxGetPr(prhs[2]);
+ i = (int) pi[0];
+
+ ns = mxGetField(prhs[0], 0, "node_sizes");
+ nsElts = mxGetPr(ns);
+ nsi = (int) nsElts[i-1];
+
+ /* Initialize the posterior */
+ plhs[0] = mxCreateDoubleMatrix (1, nsi, mxREAL);
+ y = mxGetPr(plhs[0]);
+ for (c1 = 0; c1 < nsi; c1++) {
+ y[c1] = 1;
+ }
+
+ /* Multiply in the cpt of the node i */
+ multiplySlice(prhs[0], prhs[1], i, nsi, i, prhs[3], prhs[4],
+ prhs[6], y);
+
+
+ /* Multiply in cpts of children of i */
+ children = mxGetCell (prhs[5], i-1);
+ numChildren = mxGetNumberOfElements (children);
+ childrenElts = mxGetPr (children);
+
+ for (c1 = 0; c1 < numChildren; c1++) {
+ int j;
+ j = (int) childrenElts[c1];
+ multiplySlice (prhs[0], prhs[1], i, nsi, j, prhs[3], prhs[4],
+ prhs[6], y);
+ }
+
+ sum = 0;
+ /* normalize! */
+ for (c1 = 0; c1 < nsi; c1++) {
+ sum += y[c1];
+ }
+
+ for (c1 = 0; c1 < nsi; c1++) {
+ y[c1] /= sum;
+ }
+}
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function post = compute_posterior_dbn(bnet, state, i, n, strides, families, ...
+ CPT)
+% COMPUTE_POSTERIOR
+%
+% post = compute_posterior(bnet, state, i, n, strides, families,
+% cpts)
+%
+% Compute the posterior distribution on node X_i^n of a DBN,
+% conditional on evidence in the cell array state
+%
+% strides is the cached result of compute_strides(bnet)
+% families is the cached result of compute_families(bnet)
+% cpt is the cached result of get_cpts(bnet)
+%
+% post is a one-dimensional table
+
+
+
+% First multiply in the cpt of the node itself
+post = get_slice_dbn(bnet, state, i, n, i, n, strides, families, CPT);
+post = post(:);
+
+% Then multiply in CPTs of children that are in this slice
+for j = children(bnet.intra, i)
+ slice = get_slice_dbn(bnet, state, j, n, i, n, strides, families, CPT);
+ post = post.*slice(:);
+end
+
+% Finally, if necessary, multiply in CPTs of children in the next
+% slice
+if (n < size(state,2))
+ for j = children(bnet.inter, i)
+ slice = get_slice_dbn(bnet, state, j, n+1, i, n, strides, families, ...
+ CPT);
+ post = post.*slice(:);
+ end
+end
+
+post = normalise(post);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_strides.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/compute_strides.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function strides = compute_strides(bnet)
+% COMPUTE_STRIDES For each CPT and each variable in that CPT,
+% returns the stride of that variable. So in future, we can
+% quickly extract a slice of the CPT.
+%
+% The return value is a 2d array, where strides(i,j) contains the
+% stride of the jth variable in the ith CPT. Cell arrays would
+% have saved space but they are slower.
+%
+
+num_cpts = size(bnet.CPD, 2);
+max_cpt_dim = 1 + max(sum(bnet.dag));
+strides = zeros(num_cpts, max_cpt_dim);
+
+for i = 1:num_cpts
+ c = CPT(bnet, i);
+ siz = size(CPT(bnet, i));
+
+ % Deal with the special case of a 1-d array separately
+ if siz(2) == 1
+ dim = 1;
+ else
+ dim = size(siz, 2);
+ end
+
+ strides(i, 1:dim ) = [1 cumprod(siz(1:dim-1))];
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/get_cpts.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/get_cpts.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function c = get_cpts(bnet)
+% Get all the cpts in tabular form
+
+cpds = bnet.CPD;
+c = cell(size(cpds));
+for i = 1:length(c)
+ c{i} = CPT(bnet, i);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,116 @@
+#include "mex.h"
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray
+ *prhs[])
+{
+ double *pn, *pi, *pj, *pm, *y, *ecElts, *pcpt, *famElts, *strideElts,
+ *ev, *nsElts;
+ int i, k, j, m, n;
+ mxArray *ec, *cpt, *fam, *ns;
+ int c1, famSize, nsj;
+ int strideStride, startInd, stride, pos, numNodes;
+
+ const int BNET = 0;
+ const int STATE = 1;
+ const int STRIDES = 6;
+ const int FAMILIES = 7;
+ const int CPT = 8;
+
+ pn = mxGetPr(prhs[3]);
+ n = (int) pn[0];
+ pi = mxGetPr(prhs[2]);
+ i = (int) pi[0];
+ pj = mxGetPr(prhs[4]);
+ j = (int) pj[0];
+ pm = mxGetPr(prhs[5]);
+ m = (int) pm[0];
+ ev = mxGetPr(prhs[STATE]);
+ ns = mxGetField (prhs[BNET], 0, "node_sizes");
+ nsElts = mxGetPr (ns);
+ numNodes = mxGetM(ns);
+
+ strideStride = mxGetM(prhs[STRIDES]);
+ strideElts = mxGetPr(prhs[STRIDES]);
+
+
+
+ /* Treat the case n = 1 separately */
+ if (pn[0] == 1) {
+
+ /* Get the appropriate CPT */
+ ec = mxGetField (prhs[BNET], 0, "eclass1");
+ ecElts = mxGetPr(ec);
+ k = (int) ecElts[i-1];
+ cpt = mxGetCell (prhs[8], k-1);
+ pcpt = mxGetPr(cpt);
+
+ nsj = (int) nsElts[j-1];
+
+ /* Get the correct family vector */
+ /* (Note : MEX is painful) */
+ fam = mxGetCell (prhs[FAMILIES], i - 1);
+ famSize = mxGetNumberOfElements(fam);
+ famElts = mxGetPr(fam);
+
+
+ /* Figure out starting position and stride */
+ startInd = 0;
+ for (c1 = 0, pos = k-1; c1 < famSize; c1++, pos+=strideStride) {
+ if (famElts[c1] != j) {
+ startInd += strideElts[pos]*(ev[(int)famElts[c1]-1]-1);
+ }
+ else {
+ stride = strideElts[pos];
+ }
+ }
+
+ plhs[0] = mxCreateDoubleMatrix (1, nsj, mxREAL);
+ y = mxGetPr(plhs[0]);
+ for (c1 = 0, pos = startInd; c1 < nsj; c1++, pos+=stride) {
+ y[c1] = pcpt[pos];
+ }
+ }
+
+ /* Handle the case n > 1 */
+ else {
+
+ /* Get the appropriate CPT */
+ ec = mxGetField (prhs[BNET], 0, "eclass2");
+ ecElts = mxGetPr(ec);
+ k = (int) ecElts[i-1];
+ cpt = mxGetCell (prhs[8], k-1);
+ pcpt = mxGetPr(cpt);
+
+ /* Figure out size of slice */
+ if (m == 1) {
+ nsj = (int) nsElts[j-1];
+ }
+ else {
+ nsj = (int) nsElts[j-1+numNodes];
+ }
+
+ /* Figure out family */
+ fam = mxGetCell (prhs[FAMILIES], i - 1 + numNodes);
+ famSize = mxGetNumberOfElements(fam);
+ famElts = mxGetPr(fam);
+
+ startInd = 0;
+ for (c1 = 0, pos = k-1; c1 < famSize; c1++, pos+=strideStride) {
+ int f = (int) famElts[c1];
+
+ if (((f == j+numNodes) && (m == n)) || ((f == j) && (m ==
+ n-1))) {
+ stride = strideElts[pos];
+ }
+ else {
+ startInd += strideElts[pos] * (ev[f-1+((n-2)*numNodes)]-1);
+ }
+ }
+
+ plhs[0] = mxCreateDoubleMatrix(1,nsj, mxREAL);
+ y = mxGetPr(plhs[0]);
+ for (c1 = 0, pos = startInd; c1 < nsj; c1++, pos+=stride) {
+ y[c1] = pcpt[pos];
+ }
+ }
+}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,87 @@
+function slice = get_slice_dbn(bnet, state, i, n, j, m, strides, families, ...
+ CPT)
+% slice = get_slice(bnet, state, i, n, j, m, strides, families, cpt)
+%
+% GET_SLICE get one-dimensional slice of the CPT for node X_i^n
+% that corresponds to the different values of X_j^m, where all
+% other nodes have values given by state.
+% strides is the result of
+% calling compute_strides(bnet)
+% families is the result of calling compute_families(bnet)
+% cpts is the result of calling get_cpts(bnet)
+%
+% slice is a 1-d array
+
+
+if (n == 1)
+
+ k = bnet.eclass1(i);
+ c = CPT{k};
+
+ % Figure out evidence on family
+ fam = families{i, 1};
+ ev = state(fam, 1);
+
+ % Remove evidence on node j
+ pos = find(fam == j);
+ ev(pos) = 1;
+ dim = size(ev, 1);
+
+ % Compute initial index and stride
+ start_ind = 1+strides(k, 1:dim)*(ev-1);
+ stride = strides(k, pos);
+
+ % Compute the slice
+ slice = c(start_ind:stride:start_ind+(bnet.node_sizes(j, 1)-1)*stride);
+
+else
+
+ k = bnet.eclass2(i);
+ c = CPT{k};
+
+ fam = families{i, 2};
+ ss = length(bnet.intra);
+
+ % Divide the family into nodes in this time step and nodes in the
+ % previous time step
+ this_time_step = fam(find(fam > ss));
+ prev_time_step = fam(find(fam <= ss));
+
+ % Normalize the node numbers
+ this_time_step = this_time_step - ss;
+
+ % Get the evidence
+ this_step_ev = state(this_time_step, n);
+ prev_step_ev = state(prev_time_step, n-1);
+
+ % Remove the evidence for X_j^m
+ if (m == n)
+ pos = find(this_time_step == j);
+ this_step_ev(pos) = 1;
+ pos = pos + size(prev_time_step, 2);
+ else
+ assert (m == n-1);
+ pos = find(prev_time_step == j);
+ prev_step_ev(pos) = 1;
+ end
+
+ % Combine the two time steps
+ ev = [prev_step_ev; this_step_ev];
+ dim = size(ev, 1);
+
+
+ % Compute starting index and stride
+ start_ind = 1 + strides(k, 1:dim)*(ev-1);
+ stride = strides(k, pos);
+
+ % Compute slice
+ if (m == 1)
+ q = 1;
+ else
+ q = 2;
+ end
+ slice = c(start_ind:stride:start_ind+(bnet.node_sizes(j, q)-1)*stride);
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/my_sample_discrete.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/my_sample_discrete.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function M = my_sample_discrete(prob)
+% A faster version that calls a c subfunction. Will update one
+% day to have r and c parameters as well
+
+R = rand (1,1);
+M = sample_single_discrete(R, prob);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/sample_single_discrete.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@gibbs_sampling_inf_engine/private/sample_single_discrete.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+#include "mex.h"
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray
+ *prhs[])
+{
+ double *y, *pr, *dist;
+ int k, distSize;
+ double r, cumSum;
+
+ plhs[0] = mxCreateDoubleMatrix(1,1, mxREAL);
+ y = mxGetPr (plhs[0]);
+
+ pr = mxGetPr (prhs[0]);
+ r = pr[0];
+
+ dist = mxGetPr (prhs[1]);
+ distSize = mxGetNumberOfElements (prhs[1]);
+
+ for (k = 0, cumSum = 0; (k < distSize) && (r >= cumSum); cumSum += dist[k], k++);
+
+ y[0] = k;
+}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/enter_evidence.m/1.1.1.1/Mon Jun 7 19:05:42 2004//
+/find_mpe.m/1.1.1.1/Wed Jun 19 21:56:32 2002//
+/global_joint_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Mon Jun 7 19:04:48 2004//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@global_joint_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (global_joint)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i} = [] if if X(i) is hidden, and otherwise contains its observed value.
+%
+% Warning: Computing the log likelihood requires marginalizing all the nodes and can be slow.
+%
+% The list below gives optional arguments [default value in brackets].
+%
+% exclude - list of nodes whose potential will not be included in the joint [ [] ]
+%
+% e.g., engine = enter_evidence(engine, ev, 'exclude', 3)
+
+exclude = [];
+maximize = 0;
+
+if nargin >= 3
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'exclude', exclude = args{i+1};
+ case 'maximize', maximize = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+assert(~maximize)
+bnet = bnet_from_engine(engine);
+N = length(bnet.node_sizes);
+%[engine.jpot, loglik] = compute_joint_pot(bnet, mysetdiff(1:N, exclude), evidence, 1:N);
+[engine.jpot] = compute_joint_pot(bnet, mysetdiff(1:N, exclude), evidence, 1:N);
+% jpot should not be normalized, otherwise it gives wrong resutls for limids like asia_dt1
+if nargout == 2
+ [m] = marginal_nodes(engine, []);
+ [T, lik] = normalize(m.T);
+ loglik = log(lik);
+end
+%[engine.jpot loglik] = normalize_pot(engine.jpot);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/find_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/find_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function [mpe, ll] = find_mpe(engine, evidence)
+% FIND_MPE_GLOBAL Compute the most probable explanation(s) from the global joint
+% [mpe, ll] = find_mpe(engine, evidence)
+%
+% mpe(k,i) is the most probable value of node i in the k'th global mode (cell array)
+%
+% We assume all nodes are discrete
+
+%engine = global_joint_inf_engine(bnet);
+bnet = bnet_from_engine(engine);
+engine = enter_evidence(engine, evidence);
+S1 = struct(engine); % violate object privacy
+S2 = struct(S1.jpot); % joint potential
+prob = max(S2.T(:));
+modes = find(S2.T(:) == prob);
+
+ens = bnet.node_sizes;
+onodes = find(~isemptycell(evidence));
+ens(onodes) = 1;
+mpe = ind2subv(ens, modes);
+for k=1:length(modes)
+ for i=onodes(:)'
+ mpe(k,i) = evidence{i};
+ end
+end
+ll = log(prob);
+
+mpe = num2cell(mpe);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/global_joint_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/global_joint_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function engine = global_joint_inf_engine(bnet)
+% GLOBAL_JOINT_INF_ENGINE Construct the global joint distribution as a potential
+% engine = global_joint_inf_engine(bnet)
+%
+% Warning: this has size exponential in the number of discrete hidden variables
+
+engine.jpot = [];
+engine = class(engine, 'global_joint_inf_engine', inf_engine(bnet));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function [m, pot] = marginal_family(engine, i)
+% MARGINAL_FAMILY Compute the marginal on i's family (global_inf_engine)
+% [m, pot] = marginal_family(engine, i)
+%
+
+bnet = bnet_from_engine(engine);
+[m, pot] = marginal_nodes(engine, family(bnet.dag, i));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@global_joint_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function [m, pot] = marginal_nodes(engine, query)
+% MARGINAL_NODES Compute the marginal on the specified set of nodes (global_joint)
+% [m, pot] = marginal_nodes(engine, query)
+
+pot = marginalize_pot(engine.jpot, query);
+m = pot_to_marginal(pot);
+%[m.T, lik] = normalize(m.T);
+%loglik = log(lik);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+/cliques_from_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/clq_containing_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/collect_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/distribute_evidence.m/1.1.1.1/Mon Jun 17 21:00:08 2002//
+/enter_evidence.m/1.1.1.1/Mon Jun 17 20:59:30 2002//
+/enter_soft_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/find_max_config.m/1.1.1.1/Mon Jun 17 23:14:52 2002//
+/find_mpe.m/1.1.1.1/Mon Jun 17 23:14:08 2002//
+/init_pot.m/1.1.1.1/Sun Jun 16 19:34:56 2002//
+/jtree_inf_engine.m/1.1.1.1/Fri Oct 31 22:37:48 2003//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/set_fields.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@jtree_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/collect_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/distribute_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@jtree_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/collect_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/collect_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function engine = collect_evidence(engine, root)
+
+if isempty(engine.postorder{root})
+ % this is the first time we have collected to this root
+ % memoize the order
+ [jtree, preorder, postorder] = mk_rooted_tree(engine.jtree, root);
+ postorder_parents = cell(1,length(postorder));
+ for n=postorder(1:end-1)
+ postorder_parents{n} = parents(jtree, n);
+ end
+ engine.postorder{root} = postorder;
+ engine.postorder_parents{root} = postorder_parents;
+else
+ postorder = engine.postorder{root};
+ postorder_parents = engine.postorder_parents{root};
+end
+
+C = length(engine.clpot);
+seppot = cell(C, C);
+% separators are implicitely initialized to 1s
+
+% collect to root (node to parents)
+for n=postorder(1:end-1)
+ for p=postorder_parents{n}
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ engine.seppot{p,n} = marginalize_pot(engine.clpot{n}, engine.separator{p,n}, engine.maximize);
+ engine.clpot{p} = multiply_by_pot(engine.clpot{p}, engine.seppot{p,n});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/distribute_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/distribute_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function engine = distribute_evidence(engine, root)
+
+if isempty(engine.preorder{root})
+ % this is the first time we have distributed from this root
+ % memoize the order
+ [jtree, preorder, postorder] = mk_rooted_tree(engine.jtree, root);
+ preorder_children = cell(1,length(preorder));
+ for n=preorder
+ preorder_children{n} = children(jtree, n);
+ end
+ engine.preorder{root} = preorder;
+ engine.preorder_children{root} = preorder_children;
+else
+ preorder = engine.preorder{root};
+ preorder_children = engine.preorder_children{root};
+end
+
+
+% distribute from root (node to children)
+for n=preorder(:)'
+ for c=preorder_children{n}(:)'
+ engine.clpot{c} = divide_by_pot(engine.clpot{c}, engine.seppot{n,c});
+ engine.seppot{n,c} = marginalize_pot(engine.clpot{n}, engine.separator{n,c}, engine.maximize);
+ engine.clpot{c} = multiply_by_pot(engine.clpot{c}, engine.seppot{n,c});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,107 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (jtree)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i} = [] if X(i) is hidden, and otherwise contains its observed value (scalar or column vector).
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product instead of sum-product [0]
+% soft - a cell array of soft/virtual evidence;
+% soft{i} is a prob. distrib. over i's values, or [] [ cell(1,N) ]
+%
+% e.g., engine = enter_evidence(engine, ev, 'soft', soft_ev)
+%
+% For backwards compatibility with BNT2, you can also specify the parameters in the following order
+% engine = enter_evidence(engine, ev, soft_ev)
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes(:);
+N = length(bnet.dag);
+
+% set default params
+exclude = [];
+soft_evidence = cell(1,N);
+maximize = 0;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ if iscell(args{1})
+ soft_evidence = args{1};
+ else
+ for i=1:2:nargs
+ switch args{i},
+ case 'soft', soft_evidence = args{i+1};
+ case 'maximize', maximize = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+ end
+end
+
+engine.maximize = maximize;
+
+onodes = find(~isemptycell(evidence));
+hnodes = find(isemptycell(evidence));
+pot_type = determine_pot_type(bnet, onodes);
+ if strcmp(pot_type, 'cg')
+ check_for_cd_arcs(onodes, bnet.cnodes, bnet.dag);
+end
+
+
+hard_nodes = 1:N;
+soft_nodes = find(~isemptycell(soft_evidence));
+S = length(soft_nodes);
+if S > 0
+ assert(pot_type == 'd');
+ assert(mysubset(soft_nodes, bnet.dnodes));
+end
+
+% Evaluate CPDs with evidence, and convert to potentials
+pot = cell(1, N+S);
+for n=1:N
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ pot{n} = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence);
+end
+
+for i=1:S
+ n = soft_nodes(i);
+ pot{N+i} = dpot(n, ns(n), soft_evidence{n});
+end
+
+%clqs = engine.clq_ass_to_node([hard_nodes soft_nodes]);
+%[clpot, loglik] = enter_soft_evidence(engine, clqs, pot, onodes, pot_type);
+%engine.clpot = clpot; % save the results for marginal_nodes
+
+
+clique = engine.clq_ass_to_node([hard_nodes soft_nodes]);
+potential = pot;
+
+
+% Set the clique potentials to all 1s
+C = length(engine.cliques);
+for i=1:C
+ engine.clpot{i} = mk_initial_pot(pot_type, engine.cliques{i}, ns, bnet.cnodes, onodes);
+end
+
+% Multiply on specified potentials
+for i=1:length(clique)
+ c = clique(i);
+ engine.clpot{c} = multiply_by_pot(engine.clpot{c}, potential{i});
+end
+
+root = 1; % arbitrary
+engine = collect_evidence(engine, root);
+engine = distribute_evidence(engine, root);
+
+ll = zeros(1, C);
+for i=1:C
+ [engine.clpot{i}, ll(i)] = normalize_pot(engine.clpot{i});
+end
+loglik = ll(1); % we can extract the likelihood from any clique
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/Old/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function [clpot, loglik] = enter_soft_evidence(engine, clique, potential, onodes, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified potentials to the network (jtree)
+% [clpot, loglik] = enter_soft_evidence(engine, clique, potential, onodes, pot_type, maximize)
+%
+% We multiply potential{i} onto clique(i) before propagating.
+% We return all the modified clique potentials.
+
+[clpot, seppot] = init_pot(engine, clique, potential, pot_type, onodes);
+[clpot, seppot] = collect_evidence(engine, clpot, seppot);
+[clpot, seppot] = distribute_evidence(engine, clpot, seppot);
+
+C = length(clpot);
+ll = zeros(1, C);
+for i=1:C
+ [clpot{i}, ll(i)] = normalize_pot(clpot{i});
+end
+loglik = ll(1); % we can extract the likelihood from any clique
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/cliques_from_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/cliques_from_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function cliques = cliques_from_engine(engine)
+% CLIQUES_FROM_ENGINE Return the cliques stored inside the inf. engine (jtree)
+% cliques = cliques_from_engine(engine)
+
+cliques = engine.cliques;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/clq_containing_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/clq_containing_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function c = clq_containing_nodes(engine, nodes, fam)
+% CLQ_CONTAINING_NODES Find the lightest clique (if any) that contains the set of nodes
+% c = clq_containing_nodes(engine, nodes, family)
+%
+% If the optional 'family' argument is specified, it means nodes = family(nodes(end)).
+% (This is useful since clq_ass_to_node is not accessible to outsiders.)
+% Returns c=-1 if there is no such clique.
+
+if nargin < 3, fam = 0; else fam = 1; end
+
+if length(nodes)==1
+ c = engine.clq_ass_to_node(nodes(1));
+%elseif fam
+% c = engine.clq_ass_to_node(nodes(end));
+else
+ B = engine.cliques_bitv;
+ w = engine.clique_weight;
+ clqs = find(all(B(:,nodes), 2)); % all selected columns must be 1
+ if isempty(clqs)
+ c = -1;
+ else
+ c = clqs(argmin(w(clqs)));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/collect_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/collect_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function [clpot, seppot] = collect_evidence(engine, clpot, seppot)
+% COLLECT_EVIDENCE Do message passing from leaves to root (children then parents)
+% [clpot, seppot] = collect_evidence(engine, clpot, seppot)
+
+for n=engine.postorder %postorder(1:end-1)
+ for p=engine.postorder_parents{n}
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, engine.separator{p,n}, engine.maximize);
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/distribute_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/distribute_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function [clpot, seppot] = distribute_evidence(engine, clpot, seppot)
+% DISTRIBUTE_EVIDENCE Do message passing from root to leaves (parents then children)
+% [clpot, seppot] = distribute_evidence(engine, clpot, seppot)
+
+for n=engine.preorder
+ for c=engine.preorder_children{n}
+ clpot{c} = divide_by_pot(clpot{c}, seppot{n,c});
+ seppot{n,c} = marginalize_pot(clpot{n}, engine.separator{n,c}, engine.maximize);
+ clpot{c} = multiply_by_pot(clpot{c}, seppot{n,c});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,88 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (jtree)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i} = [] if X(i) is hidden, and otherwise contains its observed value (scalar or column vector).
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% soft - a cell array of soft/virtual evidence;
+% soft{i} is a prob. distrib. over i's values, or [] [ cell(1,N) ]
+%
+% e.g., engine = enter_evidence(engine, ev, 'soft', soft_ev)
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes(:);
+N = length(bnet.dag);
+
+engine.evidence = evidence; % store this for marginal_nodes with add_ev option
+engine.maximize = 0;
+
+% set default params
+exclude = [];
+soft_evidence = cell(1,N);
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'soft', soft_evidence = args{i+1};
+ case 'maximize', engine.maximize = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+
+onodes = find(~isemptycell(evidence));
+hnodes = find(isemptycell(evidence));
+pot_type = determine_pot_type(bnet, onodes);
+ if strcmp(pot_type, 'cg')
+ check_for_cd_arcs(onodes, bnet.cnodes, bnet.dag);
+end
+
+if is_mnet(bnet)
+ pot = engine.user_pot;
+ clqs = engine.nums_ass_to_user_clqs;
+else
+ % Evaluate CPDs with evidence, and convert to potentials
+ pot = cell(1, N);
+ for n=1:N
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ if isempty(bnet.CPD{e})
+ error(['must define CPD ' num2str(e)])
+ else
+ pot{n} = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence);
+ end
+ end
+ clqs = engine.clq_ass_to_node(1:N);
+end
+
+% soft evidence
+soft_nodes = find(~isemptycell(soft_evidence));
+S = length(soft_nodes);
+if S > 0
+ assert(pot_type == 'd');
+ assert(mysubset(soft_nodes, bnet.dnodes));
+end
+for i=1:S
+ n = soft_nodes(i);
+ pot{end+1} = dpot(n, ns(n), soft_evidence{n});
+end
+clqs = [clqs engine.clq_ass_to_node(soft_nodes)];
+
+
+[clpot, seppot] = init_pot(engine, clqs, pot, pot_type, onodes);
+[clpot, seppot] = collect_evidence(engine, clpot, seppot);
+[clpot, seppot] = distribute_evidence(engine, clpot, seppot);
+
+C = length(clpot);
+ll = zeros(1, C);
+for i=1:C
+ [clpot{i}, ll(i)] = normalize_pot(clpot{i});
+end
+loglik = ll(1); % we can extract the likelihood from any clique
+
+engine.clpot = clpot;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function [clpot, loglik] = enter_soft_evidence(engine, clique, potential, onodes, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified potentials to the network (jtree)
+% [clpot, loglik] = enter_soft_evidence(engine, clique, potential, onodes, pot_type, maximize)
+%
+% We multiply potential{i} onto clique(i) before propagating.
+% We return all the modified clique potentials.
+
+% only used by BK!
+
+[clpot, seppot] = init_pot(engine, clique, potential, pot_type, onodes);
+[clpot, seppot] = collect_evidence(engine, clpot, seppot);
+[clpot, seppot] = distribute_evidence(engine, clpot, seppot);
+
+C = length(clpot);
+ll = zeros(1, C);
+for i=1:C
+ [clpot{i}, ll(i)] = normalize_pot(clpot{i});
+end
+loglik = ll(1); % we can extract the likelihood from any clique
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/find_max_config.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/find_max_config.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+function [mpe, clpot, seppot] = find_max_config(engine, clpot, seppot, evidence)
+% FIND_MAX_CONFIG Backwards pass of Viterbi fro jtree
+% function [mpe, clpot, seppot] = find_max_config(engine, clpot, seppot, evidence)
+% See Cowell99 p98
+
+bnet = bnet_from_engine(engine);
+nnodes = length(bnet.dag);
+mpe = cell(1, nnodes);
+maximize = 1;
+
+c = engine.root_clq;
+pot = struct(clpot{c}); % violate object privacy
+dom = pot.domain;
+[indices, clpot{c}] = find_most_prob_entry(clpot{c});
+mpe(dom) = num2cell(indices);
+
+for n=engine.preorder
+ for c=engine.preorder_children{n}
+ clpot{c} = divide_by_pot(clpot{c}, seppot{n,c});
+ seppot{n,c} = marginalize_pot(clpot{n}, engine.separator{n,c}, maximize);
+ clpot{c} = multiply_by_pot(clpot{c}, seppot{n,c});
+
+ pot = struct(clpot{c}); % violate object privacy
+ dom = pot.domain;
+ [indices, clpot{c}] = find_most_prob_entry(clpot{c});
+ mpe(dom) = num2cell(indices);
+ end
+end
+
+obs_nodes = find(~isemptycell(evidence));
+% indices for observed nodes will be 1 - need to overwrite these
+mpe(obs_nodes) = evidence(obs_nodes);
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/find_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/find_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+function mpe = find_mpe(engine, evidence, varargin)
+% FIND_MPE Find the most probable explanation of the data (assignment to the hidden nodes)
+% function mpe = find_mpe(engine, evidence,...)
+%
+% evidence{i} = [] if X(i) is hidden, and otherwise contains its observed value (scalar or column vector).
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% soft - a cell array of soft/virtual evidence;
+% soft{i} is a prob. distrib. over i's values, or [] [ cell(1,N) ]
+%
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes(:);
+N = length(bnet.dag);
+
+engine.evidence = evidence;
+
+% set default params
+exclude = [];
+soft_evidence = cell(1,N);
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'soft', soft_evidence = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+engine.maximize = 1;
+
+onodes = find(~isemptycell(evidence));
+hnodes = find(isemptycell(evidence));
+pot_type = determine_pot_type(bnet, onodes);
+ if strcmp(pot_type, 'cg')
+ check_for_cd_arcs(onodes, bnet.cnodes, bnet.dag);
+end
+
+hard_nodes = 1:N;
+soft_nodes = find(~isemptycell(soft_evidence));
+S = length(soft_nodes);
+if S > 0
+ assert(pot_type == 'd');
+ assert(mysubset(soft_nodes, bnet.dnodes));
+end
+
+% Evaluate CPDs with evidence, and convert to potentials
+pot = cell(1, N+S);
+for n=1:N
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ if isempty(bnet.CPD{e})
+ error(['must define CPD ' num2str(e)])
+ else
+ pot{n} = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence);
+ end
+end
+
+for i=1:S
+ n = soft_nodes(i);
+ pot{N+i} = dpot(n, ns(n), soft_evidence{n});
+end
+clqs = engine.clq_ass_to_node([hard_nodes soft_nodes]);
+
+[clpot, seppot] = init_pot(engine, clqs, pot, pot_type, onodes);
+[clpot, seppot] = collect_evidence(engine, clpot, seppot);
+mpe = find_max_config(engine, clpot, seppot, evidence); % instead of distribute evidence
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/init_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/init_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+function [clpot, seppot] = init_pot(engine, clqs, pots, pot_type, onodes, ndx)
+% INIT_POT Initialise potentials with evidence (jtree_inf)
+% function [clpot, seppot] = init_pot(engine, clqs, pots, pot_type, onodes)
+
+cliques = engine.cliques;
+bnet = bnet_from_engine(engine);
+% Set the clique potentials to all 1s
+C = length(cliques);
+clpot = cell(1,C);
+for i=1:C
+ clpot{i} = mk_initial_pot(pot_type, cliques{i}, bnet.node_sizes(:), bnet.cnodes(:), onodes);
+end
+
+% Multiply on specified potentials
+for i=1:length(clqs)
+ c = clqs(i);
+ clpot{c} = multiply_by_pot(clpot{c}, pots{i});
+end
+
+seppot = cell(C,C); % implicitely initialized to 1
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/jtree_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/jtree_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,141 @@
+function engine = jtree_inf_engine(bnet, varargin)
+% JTREE_INF_ENGINE Junction tree inference engine
+% engine = jtree_inf_engine(bnet, ...)
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% clusters - a cell array of sets of nodes we want to ensure are in the same clique (in addition to families) [ {} ]
+% root - the root of the junction tree will be a clique that contains this set of nodes [N]
+% stages - stages{t} is a set of nodes we want to eliminate before stages{t+1}, ... [ {1:N} ]
+%
+% e.g., engine = jtree_inf_engine(bnet, 'maximize', 1);
+%
+% For more details on the junction tree algorithm, see
+% - "Probabilistic networks and expert systems", Cowell, Dawid, Lauritzen and Spiegelhalter, Springer, 1999
+% - "Inference in Belief Networks: A procedural guide", C. Huang and A. Darwiche,
+% Intl. J. Approximate Reasoning, 15(3):225-263, 1996.
+
+
+% set default params
+N = length(bnet.dag);
+clusters = {};
+root = N;
+stages = { 1:N };
+maximize = 0;
+
+if nargin >= 2
+ args = varargin;
+ nargs = length(args);
+ if ~isstr(args{1})
+ error('the interface to jtree has changed; now, onodes is not allowed and all optional params must be passed by name')
+ end
+ for i=1:2:nargs
+ switch args{i},
+ case 'clusters', clusters = args{i+1};
+ case 'root', root = args{i+1};
+ case 'stages', stages = args{i+1};
+ case 'maximize', maximize = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+engine = init_fields;
+engine = class(engine, 'jtree_inf_engine', inf_engine(bnet));
+
+engine.maximize = maximize;
+
+onodes = bnet.observed;
+
+%[engine.jtree, dummy, engine.cliques, B, w, elim_order, moral_edges, fill_in_edges, strong] = ...
+% dag_to_jtree(bnet, onodes, stages, clusters);
+
+porder = determine_elim_constraints(bnet, onodes);
+strong = ~isempty(porder);
+ns = bnet.node_sizes(:);
+ns(onodes) = 1; % observed nodes have only 1 possible value
+[engine.jtree, root2, engine.cliques, B, w] = ...
+ graph_to_jtree(moralize(bnet.dag), ns, porder, stages, clusters);
+
+
+engine.cliques_bitv = B;
+engine.clique_weight = w;
+C = length(engine.cliques);
+engine.clpot = cell(1,C);
+
+% Compute the separators between connected cliques.
+[is,js] = find(engine.jtree > 0);
+engine.separator = cell(C,C);
+for k=1:length(is)
+ i = is(k); j = js(k);
+ engine.separator{i,j} = find(B(i,:) & B(j,:)); % intersect(cliques{i}, cliques{j});
+end
+
+% A node can be a member of many cliques, but is assigned to exactly one, to avoid
+% double-counting its CPD. We assign node i to clique c if c is the "lightest" clique that
+% contains i's family, so it can accomodate its CPD.
+
+engine.clq_ass_to_node = zeros(1, N);
+for i=1:N
+ %c = clq_containing_nodes(engine, family(bnet.dag, i));
+ clqs_containing_family = find(all(B(:,family(bnet.dag, i)), 2)); % all selected columns must be 1
+ c = clqs_containing_family(argmin(w(clqs_containing_family)));
+ engine.clq_ass_to_node(i) = c;
+end
+
+% Make the jtree rooted, so there is a fixed message passing order.
+if strong
+ % the last clique is guaranteed to be a strong root
+ % engine.root_clq = length(engine.cliques);
+
+ % --- 4/17/2010, by Wei Sun (George Mason University):
+ % It has been proved that the last clique is not necessary to be the
+ % strong root, instead, a clique called interface clique, that contains
+ % all discrete parents and at least one continuous node from a connected
+ % continuous component in a CLG, is guaranteed to be a strong root.
+ engine.root_clq = findroot(bnet, engine.cliques) ;
+else
+ % jtree_dbn_inf_engine requires the root to contain the interface.
+ % This may conflict with the strong root requirement! *********** BUG *************
+ engine.root_clq = clq_containing_nodes(engine, root);
+ if engine.root_clq <= 0
+ error(['no clique contains ' num2str(root)]);
+ end
+end
+
+[engine.jtree, engine.preorder, engine.postorder] = mk_rooted_tree(engine.jtree, engine.root_clq);
+
+% collect
+engine.postorder_parents = cell(1,length(engine.postorder));
+for n=engine.postorder(:)'
+ engine.postorder_parents{n} = parents(engine.jtree, n);
+end
+% distribute
+engine.preorder_children = cell(1,length(engine.preorder));
+for n=engine.preorder(:)'
+ engine.preorder_children{n} = children(engine.jtree, n);
+end
+
+
+
+%%%%%%%%
+
+function engine = init_fields()
+
+engine.jtree = [];
+engine.cliques = [];
+engine.separator = [];
+engine.cliques_bitv = [];
+engine.clique_weight = [];
+engine.clpot = [];
+engine.clq_ass_to_node = [];
+engine.root_clq = [];
+engine.preorder = [];
+engine.postorder = [];
+engine.preorder_children = [];
+engine.postorder_parents = [];
+engine.maximize = [];
+engine.evidence = [];
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function marginal = marginal_family(engine, i, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (jtree)
+% marginal = marginal_family(engine, i)
+
+if nargin < 3, add_ev = 0; end
+assert(~add_ev);
+
+bnet = bnet_from_engine(engine);
+fam = family(bnet.dag, i);
+c = engine.clq_ass_to_node(i);
+marginal = pot_to_marginal(marginalize_pot(engine.clpot{c}, fam));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function marginal = marginal_nodes(engine, query, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (jtree)
+% marginal = marginal_nodes(engine, query, add_ev)
+%
+% 'query' must be a subset of some clique; an error will be raised if not.
+% add_ev is an optional argument; if 1, we will "inflate" the marginal of observed nodes
+% to their original size, adding 0s to the positions which contradict the evidence
+
+if nargin < 3, add_ev = 0; end
+
+c = clq_containing_nodes(engine, query);
+if c == -1
+ error(['no clique contains ' num2str(query)]);
+end
+marginal = pot_to_marginal(marginalize_pot(engine.clpot{c}, query, engine.maximize));
+
+if add_ev
+ bnet = bnet_from_engine(engine);
+ %marginal = add_ev_to_dmarginal(marginal, engine.evidence, bnet.node_sizes);
+ marginal = add_evidence_to_gmarginal(marginal, engine.evidence, bnet.node_sizes, bnet.cnodes);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/set_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_inf_engine/set_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function engine = set_fields(engine, varargin)
+% SET_FIELDS Set the fields for a generic engine
+% engine = set_fields(engine, name/value pairs)
+%
+% e.g., engine = set_fields(engine, 'maximize', 1)
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'maximize', engine.maximize = args{i+1};
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/jtree_limid_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@jtree_limid_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes_SS.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@jtree_limid_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function [m, pot] = marginal_family(engine, query)
+% MARGINAL_NODES Compute the marginal on the family of the specified node (jtree_limid)
+% [m, pot] = marginal_family(engine, query)
+%
+% query should be a single decision node, or [] (to compute global max expected utility)
+
+bnet = bnet_from_engine(engine);
+if isempty(query)
+ compute_meu = 1;
+ d = bnet.decision_nodes(1); % pick an arbitrary root to collect to
+ fam = []; % marginalize root pot down to a point
+else
+ compute_meu = 0;
+ d = query;
+ assert(myismember(d, bnet.decision_nodes));
+ fam = family(bnet.dag, d);
+end
+
+clpot = init_clpot(bnet, engine.cliques, engine.clq_ass_to_node, engine.evidence, engine.exclude);
+
+% collect to root (clique containing d)
+C = length(engine.cliques);
+seppot = cell(C, C); % separators are implicitely initialized to 1s
+for n=engine.postorder{d}(1:end-1)
+ for p=parents(engine.rooted_jtree{d}, n)
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, engine.separator{p,n});
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+end
+
+root = engine.clq_ass_to_node(d);
+assert(root == engine.postorder{d}(end));
+pot = marginalize_pot(clpot{root}, fam);
+m = pot_to_marginal(pot);
+
+%%%%%%%%%%%
+
+
+function clpot = init_clpot(bnet, cliques, clq_ass_to_node, evidence, exclude)
+
+% Set the clique potentials to all 1s
+C = length(cliques);
+clpot = cell(1, C);
+ns = bnet.node_sizes;
+for i=1:C
+ clpot{i} = upot(cliques{i}, ns(cliques{i}));
+end
+
+N = length(bnet.dag);
+nodes = mysetdiff(1:N, exclude);
+
+for n=nodes(:)'
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ c = clq_ass_to_node(n);
+ pot = convert_to_pot(bnet.CPD{e}, 'u', ns, fam, evidence);
+ clpot{c} = multiply_by_pot(clpot{c}, pot);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/marginal_nodes_SS.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/Old/marginal_nodes_SS.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function [pot, MEU] = marginal_nodes(engine, d)
+
+C = length(cliques);
+%clpot = init_clpot(limid, cliques, d, clq_ass_to_node);
+clpot = init_clpot(limid, cliques, [], clq_ass_to_node);
+
+% collect to root
+if 1
+ % HUGIN
+ seppot = cell(C, C); % separators are implicitely initialized to 1s
+ for n=postorder{di}(1:end-1)
+ for p=parents(rooted_jtree{di}, n)
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, separator{p,n});
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+ end
+else
+ % Shafer-Shenoy
+ msg = cell(C,C);
+ for n=postorder{di}(1:end-1)
+ for c=children(rooted_jtree{di}, n)
+ clpot{n} = multiply_by_pot(clpot{n}, msg{c,n});
+ end
+ p = parents(rooted_jtree{di}, n);
+ %msg{n,p} = marginalize_pot(clpot{n}, cliques{p});
+ msg{n,p} = marginalize_pot(clpot{n}, separator{n,p});
+ end
+ root = clq_ass_to_node(d);
+ n=postorder{di}(end);
+ assert(n == root);
+ for c=children(rooted_jtree{di}, n)
+ clpot{n} = multiply_by_pot(clpot{n}, msg{c,n});
+ end
+end
+
+fam = family(limid.dag, d);
+pot = marginalize_pot(clpot{root}, fam);
+
+%%%%%%%
+jpot = compute_joint_pot_limid(limid);
+pot2 = marginalize_pot(jpot, fam);
+assert(approxeq_pot(pot, pot2))
+%%%%%%
+
+[policy, score] = extract_policy(pot);
+
+e = limid.equiv_class(d);
+limid.CPD{e} = set_params(limid.CPD{e}, 'policy', policy);
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function engine = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (jtree_limid)
+% engine = enter_evidence(engine, evidence, ...)
+%
+% evidence{i} = [] if if X(i) is hidden, and otherwise contains its observed value.
+%
+% The list below gives optional arguments [default value in brackets].
+%
+% exclude - list of nodes whose potential will not be included in the joint [ [] ]
+%
+% e.g., engine = enter_evidence(engine, ev, 'exclude', 3)
+
+exclude = [];
+
+if nargin >= 3
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'exclude', exclude = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+engine.exclude = exclude;
+engine.evidence = evidence;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/jtree_limid_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/jtree_limid_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function engine = jtree_limid_inf_engine(bnet)
+% JTREE_LIMID_INF_ENGINE Make a junction tree engine for use by solve_limid
+% engine = jtree_limid_inf_engine(bnet)
+%
+% This engine is designed to compute marginals on decision nodes
+
+
+MG = moralize(bnet.dag);
+% We do not remove the utility nodes, because that complicates the book-keeping.
+% Leaving them in will not introduce any un-necessary triangulation arcs, because they are always leaves.
+% Also, since utility nodes have size 1, they do not increase the size of the potentials.
+
+ns = bnet.node_sizes;
+elim_order = best_first_elim_order(MG, ns);
+[MTG, engine.cliques] = triangulate(MG, elim_order);
+[engine.jtree, root, B, w] = cliques_to_jtree(engine.cliques, ns);
+
+% A node can be a member of many cliques, but is assigned to exactly one, to avoid
+% double-counting its CPD. We assign node i to clique c if c is the "lightest" clique that
+% contains i's family, so it can accomodate its CPD.
+N = length(bnet.dag);
+engine.clq_ass_to_node = zeros(1, N);
+for i=1:N
+ clqs_containing_family = find(all(B(:,family(bnet.dag, i)), 2)); % all selected columns must be 1
+ c = clqs_containing_family(argmin(w(clqs_containing_family)));
+ engine.clq_ass_to_node(i) = c;
+end
+
+
+% Compute the separators between connected cliques.
+[is,js] = find(engine.jtree > 0);
+num_cliques = length(engine.cliques);
+engine.separator = cell(num_cliques, num_cliques);
+for k=1:length(is)
+ i = is(k); j = js(k);
+ engine.separator{i,j} = find(B(i,:) & B(j,:)); % intersect(cliques{i}, cliques{j});
+end
+
+
+% create |D| different rooted jtree's
+engine.rooted_jtree = cell(1, N);
+engine.preorder = cell(1, N);
+engine.postorder = cell(1, N);
+for d=bnet.decision_nodes(:)'
+ root = engine.clq_ass_to_node(d);
+ [engine.rooted_jtree{d}, engine.preorder{d}, engine.postorder{d}] = mk_rooted_tree(engine.jtree, root);
+end
+
+engine.exclude = [];
+engine.evidence = [];
+
+engine = class(engine, 'jtree_limid_inf_engine', inf_engine(bnet));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function [m, pot] = marginal_family(engine, query)
+% MARGINAL_NODES Compute the marginal on the family of the specified node (jtree_limid)
+% [m, pot] = marginal_family(engine, query)
+%
+% query should be a single decision node
+
+bnet = bnet_from_engine(engine);
+d = query;
+assert(myismember(d, bnet.decision_nodes));
+fam = family(bnet.dag, d);
+
+clpot = init_clpot(bnet, engine.cliques, engine.clq_ass_to_node, engine.evidence, engine.exclude);
+
+% collect to root (clique containing d)
+C = length(engine.cliques);
+seppot = cell(C, C); % separators are implicitely initialized to 1s
+for n=engine.postorder{d}(1:end-1)
+ for p=parents(engine.rooted_jtree{d}, n)
+ %clpot{p} = divide_by_pot(clpot{n}, seppot{p,n}); % dividing by 1 is redundant
+ seppot{p,n} = marginalize_pot(clpot{n}, engine.separator{p,n});
+ clpot{p} = multiply_by_pot(clpot{p}, seppot{p,n});
+ end
+end
+
+root = engine.clq_ass_to_node(d);
+assert(root == engine.postorder{d}(end));
+pot = marginalize_pot(clpot{root}, fam);
+m = pot_to_marginal(pot);
+
+%%%%%%%%%%%
+
+
+function clpot = init_clpot(bnet, cliques, clq_ass_to_node, evidence, exclude)
+
+% Set the clique potentials to all 1s
+C = length(cliques);
+clpot = cell(1, C);
+ns = bnet.node_sizes;
+for i=1:C
+ clpot{i} = upot(cliques{i}, ns(cliques{i}));
+end
+
+N = length(bnet.dag);
+nodes = mysetdiff(1:N, exclude);
+
+for n=nodes(:)'
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ c = clq_ass_to_node(n);
+ pot = convert_to_pot(bnet.CPD{e}, 'u', fam(:), evidence);
+ clpot{c} = multiply_by_pot(clpot{c}, pot);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_limid_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function [m, pot] = marginal_nodes(engine, query)
+% MARGINAL_NODES Compute the marginal on the specified nodes (jtree_limid)
+% [m, pot] = marginal_nodes(engine, query)
+%
+% query should be a subset of a family of a decision node
+
+if isempty(query)
+ bnet = bnet_from_engine(engine);
+ d = bnet.decision_nodes(1); % pick an arbitrary decision node
+ [dummy, big_pot] = marginal_family(engine, d);
+else
+ [dummy, big_pot] = marginal_family(engine, query);
+end
+pot = marginalize_pot(big_pot, query);
+m = pot_to_marginal(pot);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/enter_evidence.m/1.1.1.1/Mon Jun 17 20:30:02 2002//
+/find_mpe.m/1.1.1.1/Mon Jun 17 20:29:40 2002//
+/jtree_mnet_inf_engine.m/1.1.1.1/Sat Jan 18 22:13:32 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@jtree_mnet_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,82 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (jtree)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i} = [] if X(i) is hidden, and otherwise contains its observed value (scalar or column vector).
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% soft - a cell array of soft/virtual evidence;
+% soft{i} is a prob. distrib. over i's values, or [] [ cell(1,N) ]
+%
+% e.g., engine = enter_evidence(engine, ev, 'soft', soft_ev)
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes(:);
+N = length(bnet.dag);
+
+engine.evidence = evidence; % store this for marginal_nodes with add_ev option
+engine.maximize = 0;
+
+% set default params
+exclude = [];
+soft_evidence = cell(1,N);
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'soft', soft_evidence = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+
+onodes = find(~isemptycell(evidence));
+hnodes = find(isemptycell(evidence));
+pot_type = determine_pot_type(bnet, onodes);
+ if strcmp(pot_type, 'cg')
+ check_for_cd_arcs(onodes, bnet.cnodes, bnet.dag);
+end
+
+% Evaluate CPDs with evidence, and convert to potentials
+pot = cell(1, N);
+for n=1:N
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ if isempty(bnet.CPD{e})
+ error(['must define CPD ' num2str(e)])
+ else
+ pot{n} = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence);
+ end
+end
+clqs = engine.clq_ass_to_node(1:N);
+
+% soft evidence
+soft_nodes = find(~isemptycell(soft_evidence));
+S = length(soft_nodes);
+if S > 0
+ assert(pot_type == 'd');
+ assert(mysubset(soft_nodes, bnet.dnodes));
+end
+for i=1:S
+ n = soft_nodes(i);
+ pot{end+1} = dpot(n, ns(n), soft_evidence{n});
+end
+clqs = [clqs engine.clq_ass_to_node(soft_nodes)];
+
+
+[clpot, seppot] = init_pot(engine, clqs, pot, pot_type, onodes);
+[clpot, seppot] = collect_evidence(engine, clpot, seppot);
+[clpot, seppot] = distribute_evidence(engine, clpot, seppot);
+
+C = length(clpot);
+ll = zeros(1, C);
+for i=1:C
+ [clpot{i}, ll(i)] = normalize_pot(clpot{i});
+end
+loglik = ll(1); % we can extract the likelihood from any clique
+
+engine.clpot = clpot;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/find_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/find_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+function mpe = find_mpe(engine, evidence, varargin)
+% FIND_MPE Find the most probable explanation of the data (assignment to the hidden nodes)
+% function mpe = find_mpe(engine, evidence,...)
+%
+% evidence{i} = [] if X(i) is hidden, and otherwise contains its observed value (scalar or column vector).
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% soft - a cell array of soft/virtual evidence;
+% soft{i} is a prob. distrib. over i's values, or [] [ cell(1,N) ]
+%
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes(:);
+N = length(bnet.dag);
+
+engine.evidence = evidence;
+
+% set default params
+exclude = [];
+soft_evidence = cell(1,N);
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'soft', soft_evidence = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+end
+engine.maximize = 1;
+
+onodes = find(~isemptycell(evidence));
+hnodes = find(isemptycell(evidence));
+pot_type = determine_pot_type(bnet, onodes);
+ if strcmp(pot_type, 'cg')
+ check_for_cd_arcs(onodes, bnet.cnodes, bnet.dag);
+end
+
+hard_nodes = 1:N;
+soft_nodes = find(~isemptycell(soft_evidence));
+S = length(soft_nodes);
+if S > 0
+ assert(pot_type == 'd');
+ assert(mysubset(soft_nodes, bnet.dnodes));
+end
+
+% Evaluate CPDs with evidence, and convert to potentials
+pot = cell(1, N+S);
+for n=1:N
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ if isempty(bnet.CPD{e})
+ error(['must define CPD ' num2str(e)])
+ else
+ pot{n} = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence);
+ end
+end
+
+for i=1:S
+ n = soft_nodes(i);
+ pot{N+i} = dpot(n, ns(n), soft_evidence{n});
+end
+clqs = engine.clq_ass_to_node([hard_nodes soft_nodes]);
+
+[clpot, seppot] = init_pot(engine, clqs, pot, pot_type, onodes);
+[clpot, seppot] = collect_evidence(engine, clpot, seppot);
+mpe = find_max_config(engine, clpot, seppot);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/jtree_mnet_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_mnet_inf_engine/jtree_mnet_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,101 @@
+function engine = jtree_mnet_inf_engine(model, varargin)
+% JTREE_MNET_INF_ENGINE Junction tree inference engine for Markov nets
+% engine = jtree_inf_engine(mnet, ...)
+%
+
+% set default params
+N = length(mnet.graph);
+root = N;
+
+engine = init_fields;
+engine = class(engine, 'jtree_mnet_inf_engine', inf_engine(bnet));
+
+onodes = bnet.observed;
+if is_mnet(bnet)
+ MG = bnet.graph;
+else
+ error('should be a mnet')
+end
+
+%[engine.jtree, dummy, engine.cliques, B, w, elim_order, moral_edges, fill_in_edges, strong] = ...
+% dag_to_jtree(bnet, onodes, stages, clusters);
+
+porder = determine_elim_constraints(bnet, onodes);
+strong = ~isempty(porder);
+ns = bnet.node_sizes(:);
+ns(onodes) = 1; % observed nodes have only 1 possible value
+[engine.jtree, root2, engine.cliques, B, w] = ...
+ graph_to_jtree(MG, ns, porder, stages, clusters);
+
+engine.cliques_bitv = B;
+engine.clique_weight = w;
+C = length(engine.cliques);
+engine.clpot = cell(1,C);
+
+% Compute the separators between connected cliques.
+[is,js] = find(engine.jtree > 0);
+engine.separator = cell(C,C);
+for k=1:length(is)
+ i = is(k); j = js(k);
+ engine.separator{i,j} = find(B(i,:) & B(j,:)); % intersect(cliques{i}, cliques{j});
+end
+
+% A node can be a member of many cliques, but is assigned to exactly one, to avoid
+% double-counting its CPD. We assign node i to clique c if c is the "lightest" clique that
+% contains i's family, so it can accomodate its CPD.
+
+engine.clq_ass_to_node = zeros(1, N);
+for i=1:N
+ %c = clq_containing_nodes(engine, family(bnet.dag, i));
+ clqs_containing_family = find(all(B(:,family(bnet.dag, i)), 2)); % all selected columns must be 1
+ c = clqs_containing_family(argmin(w(clqs_containing_family)));
+ engine.clq_ass_to_node(i) = c;
+end
+
+% Make the jtree rooted, so there is a fixed message passing order.
+if strong
+ % the last clique is guaranteed to be a strong root
+ engine.root_clq = length(engine.cliques);
+else
+ % jtree_dbn_inf_engine requires the root to contain the interface.
+ % This may conflict with the strong root requirement! *********** BUG *************
+ engine.root_clq = clq_containing_nodes(engine, root);
+ if engine.root_clq <= 0
+ error(['no clique contains ' num2str(root)]);
+ end
+end
+
+[engine.jtree, engine.preorder, engine.postorder] = mk_rooted_tree(engine.jtree, engine.root_clq);
+
+% collect
+engine.postorder_parents = cell(1,length(engine.postorder));
+for n=engine.postorder(:)'
+ engine.postorder_parents{n} = parents(engine.jtree, n);
+end
+% distribute
+engine.preorder_children = cell(1,length(engine.preorder));
+for n=engine.preorder(:)'
+ engine.preorder_children{n} = children(engine.jtree, n);
+end
+
+
+
+%%%%%%%%
+
+function engine = init_fields()
+
+engine.jtree = [];
+engine.cliques = [];
+engine.separator = [];
+engine.cliques_bitv = [];
+engine.clique_weight = [];
+engine.clpot = [];
+engine.clq_ass_to_node = [];
+engine.root_clq = [];
+engine.preorder = [];
+engine.postorder = [];
+engine.preorder_children = [];
+engine.postorder_parents = [];
+engine.maximize = [];
+engine.evidence = [];
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+/cliques_from_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/clq_containing_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/collect_evidence.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/distribute_evidence.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/enter_soft_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/init_pot.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/jtree_sparse_inf_engine.m/1.1.1.1/Sat Jan 18 22:11:32 2003//
+/marginal_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/set_fields.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@jtree_sparse_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/cliques_from_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/cliques_from_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function cliques = cliques_from_engine(engine)
+% CLIQUES_FROM_ENGINE Return the cliques stored inside the inf. engine (jtree)
+% cliques = cliques_from_engine(engine)
+
+cliques = engine.cliques;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/clq_containing_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/clq_containing_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function c = clq_containing_nodes(engine, nodes, fam)
+% CLQ_CONTAINING_NODES Find the lightest clique (if any) that contains the set of nodes
+% c = clq_containing_nodes(engine, nodes, family)
+%
+% If the optional 'family' argument is specified, it means nodes = family(nodes(end)).
+% (This is useful since clq_ass_to_node is not accessible to outsiders.)
+% Returns c=-1 if there is no such clique.
+
+if nargin < 3, fam = 0; else fam = 1; end
+
+if length(nodes)==1
+ c = engine.clq_ass_to_node(nodes(1));
+%elseif fam
+% c = engine.clq_ass_to_node(nodes(end));
+else
+ B = engine.cliques_bitv;
+ w = engine.clique_weight;
+ clqs = find(all(B(:,nodes), 2)); % all selected columns must be 1
+ if isempty(clqs)
+ c = -1;
+ else
+ c = clqs(argmin(w(clqs)));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/collect_evidence.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/collect_evidence.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,634 @@
+/* C mex for collect_evidence.c in @jtree_sparse_inf_engine directory */
+/* File enter_evidence.m in directory @jtree_sparse_inf_engine call it*/
+
+/******************************************/
+/* collect_evidence has 3 input & 2 output*/
+/* engine */
+/* clpot */
+/* seppot */
+/* */
+/* clpot */
+/* seppot */
+/******************************************/
+
+#include
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i 0
+ if iscell(args{1})
+ soft_evidence = args{1};
+ else
+ for i=1:2:nargs
+ switch args{i},
+ case 'soft', soft_evidence = args{i+1};
+ case 'maximize', maximize = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+ end
+end
+
+engine.maximize = maximize;
+
+onodes = find(~isemptycell(evidence));
+hnodes = find(isemptycell(evidence));
+pot_type = determine_pot_type(bnet, onodes);
+ if strcmp(pot_type, 'cg')
+ check_for_cd_arcs(onodes, bnet.cnodes, bnet.dag);
+end
+
+hard_nodes = 1:N;
+soft_nodes = find(~isemptycell(soft_evidence));
+S = length(soft_nodes);
+if S > 0
+ assert(pot_type == 'd');
+ assert(mysubset(soft_nodes, bnet.dnodes));
+end
+
+% Evaluate CPDs with evidence, and convert to potentials
+pot = cell(1, N+S);
+for n=1:N
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ if isempty(bnet.CPD{e})
+ error(['must define CPD ' num2str(e)])
+ else
+ pot{n} = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence);
+ end
+end
+
+for i=1:S
+ n = soft_nodes(i);
+ pot{N+i} = dpot(n, ns(n), soft_evidence{n});
+end
+clqs = engine.clq_ass_to_node([hard_nodes soft_nodes]);
+
+[clpot, seppot] = init_pot(engine, clqs, pot, pot_type, onodes);
+[clpot, seppot] = collect_evidence(engine, clpot, seppot);
+[clpot, seppot] = distribute_evidence(engine, clpot, seppot);
+C = length(clpot);
+ll = zeros(1, C);
+for i=1:C
+ domain = clpot{i}.domain;
+ sizes = clpot{i}.sizes;
+ T = clpot{i}.T;
+ clpot{i} = dpot(domain, sizes, T);
+end
+
+for i=1:C
+ [clpot{i}, ll(i)] = normalize_pot(clpot{i});
+end
+loglik = ll(1); % we can extract the likelihood from any clique
+
+engine.clpot = clpot;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/enter_soft_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/enter_soft_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function [clpot, loglik] = enter_soft_evidence(engine, clique, potential, onodes, pot_type)
+% ENTER_SOFT_EVIDENCE Add the specified potentials to the network (jtree)
+% [clpot, loglik] = enter_soft_evidence(engine, clique, potential, onodes, pot_type, maximize)
+%
+% We multiply potential{i} onto clique(i) before propagating.
+% We return all the modified clique potentials.
+
+[clpot, seppot] = init_pot(engine, clique, potential, pot_type, onodes);
+[clpot, seppot] = collect_evidence(engine, clpot, seppot);
+[clpot, seppot] = distribute_evidence(engine, clpot, seppot);
+
+C = length(clpot);
+ll = zeros(1, C);
+for i=1:C
+ [clpot{i}, ll(i)] = normalize_pot(clpot{i});
+end
+loglik = ll(1); % we can extract the likelihood from any clique
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/init_pot.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/init_pot.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,624 @@
+/* C mex init_pot for in @jtree_sparse_inf_engine directory */
+/* The file enter_evidence.m in directory @jtree_sparse_inf_engine call it*/
+
+/**************************************/
+/* init_pot.c has 5 input & 2 output */
+/* engine */
+/* clqs */
+/* pots */
+/* pot_type */
+/* onodes */
+/* */
+/* clpot */
+/* seppot */
+/**************************************/
+#include
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i= 2
+ args = varargin;
+ nargs = length(args);
+ if ~isstr(args{1})
+ error('the interface to jtree has changed; now, onodes is not allowed and all optional params must be passed by name')
+ end
+ for i=1:2:nargs
+ switch args{i},
+ case 'clusters', clusters = args{i+1};
+ case 'root', root = args{i+1};
+ case 'stages', stages = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+engine = init_fields;
+engine = class(engine, 'jtree_sparse_inf_engine', inf_engine(bnet));
+
+onodes = bnet.observed;
+%[engine.jtree, dummy, engine.cliques, B, w] = dag_to_jtree(bnet, onodes, stages, clusters);
+
+porder = determine_elim_constraints(bnet, onodes);
+strong = ~isempty(porder);
+ns = bnet.node_sizes(:);
+ns(onodes) = 1; % observed nodes have only 1 possible value
+[engine.jtree, root2, engine.cliques, B, w] = ...
+ graph_to_jtree(moralize(bnet.dag), ns, porder, stages, clusters);
+
+engine.cliques_bitv = B;
+engine.clique_weight = w;
+C = length(engine.cliques);
+engine.clpot = cell(1,C);
+
+% Compute the separators between connected cliques.
+[is,js] = find(engine.jtree > 0);
+engine.separator = cell(C,C);
+for k=1:length(is)
+ i = is(k); j = js(k);
+ engine.separator{i,j} = find(B(i,:) & B(j,:)); % intersect(cliques{i}, cliques{j});
+end
+
+% A node can be a member of many cliques, but is assigned to exactly one, to avoid
+% double-counting its CPD. We assign node i to clique c if c is the "lightest" clique that
+% contains i's family, so it can accomodate its CPD.
+
+engine.clq_ass_to_node = zeros(1, N);
+for i=1:N
+ %c = clq_containing_nodes(engine, family(bnet.dag, i));
+ clqs_containing_family = find(all(B(:,family(bnet.dag, i)), 2)); % all selected columns must be 1
+ c = clqs_containing_family(argmin(w(clqs_containing_family)));
+ engine.clq_ass_to_node(i) = c;
+end
+
+% Make the jtree rooted, so there is a fixed message passing order.
+engine.root_clq = clq_containing_nodes(engine, root);
+if engine.root_clq <= 0
+ error(['no clique contains ' num2str(root)]);
+end
+
+[engine.jtree, engine.preorder, engine.postorder] = mk_rooted_tree(engine.jtree, engine.root_clq);
+
+% collect
+engine.postorder_parents = cell(1,length(engine.postorder));
+for n=engine.postorder(:)'
+ engine.postorder_parents{n} = parents(engine.jtree, n);
+end
+% distribute
+engine.preorder_children = cell(1,length(engine.preorder));
+for n=engine.preorder(:)'
+ engine.preorder_children{n} = children(engine.jtree, n);
+end
+
+ns = bnet.node_sizes;
+engine.actual_node_sizes = ns;
+
+
+%%%%%%%%
+
+function engine = init_fields()
+
+engine.jtree = [];
+engine.cliques = [];
+engine.separator = [];
+engine.cliques_bitv = [];
+engine.clique_weight = [];
+engine.clpot = [];
+engine.clq_ass_to_node = [];
+engine.root_clq = [];
+engine.preorder = [];
+engine.postorder = [];
+engine.preorder_children = [];
+engine.postorder_parents = [];
+engine.maximize = [];
+engine.evidence = [];
+engine.actual_node_sizes = [];
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function marginal = marginal_family(engine, i, add_ev)
+% MARGINAL_FAMILY Compute the marginal on the specified family (jtree)
+% marginal = marginal_family(engine, i)
+
+if nargin < 3, add_ev = 0; end
+assert(~add_ev);
+
+bnet = bnet_from_engine(engine);
+fam = family(bnet.dag, i);
+c = engine.clq_ass_to_node(i);
+marginal = pot_to_marginal(marginalize_pot(engine.clpot{c}, fam));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function marginal = marginal_nodes(engine, query, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (jtree)
+% marginal = marginal_nodes(engine, query, add_ev)
+%
+% 'query' must be a subset of some clique; an error will be raised if not.
+% add_ev is an optional argument; if 1, we will "inflate" the marginal of observed nodes
+% to their original size, adding 0s to the positions which contradict the evidence
+
+if nargin < 3, add_ev = 0; end
+
+c = clq_containing_nodes(engine, query);
+if c == -1
+ error(['no clique contains ' num2str(query)]);
+end
+marginal = pot_to_marginal(marginalize_pot(engine.clpot{c}, query, engine.maximize));
+
+if add_ev
+ bnet = bnet_from_engine(engine);
+ %marginal = add_ev_to_dmarginal(marginal, engine.evidence, bnet.node_sizes);
+ marginal = add_evidence_to_gmarginal(marginal, engine.evidence, bnet.node_sizes, bnet.cnodes);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/collect_evidence.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/distribute_evidence.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/init_pot.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/init_pot1.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/init_pot1.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@jtree_sparse_inf_engine/old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/old/collect_evidence.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@jtree_sparse_inf_engine/old/collect_evidence.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,635 @@
+/* C mex for collect_evidence.c in @jtree_sparse_inf_engine directory */
+/* File enter_evidence.m in directory @jtree_sparse_inf_engine call it*/
+
+/******************************************/
+/* collect_evidence has 3 input & 2 output*/
+/* engine */
+/* clpot */
+/* seppot */
+/* */
+/* clpot */
+/* seppot */
+/******************************************/
+
+#include
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i= 2
+ args = varargin;
+ nargs = length(args);
+ for i=1:2:nargs
+ switch args{i},
+ case 'nsamples', nsamples= args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+engine.nsamples = nsamples;
+engine.samples = [];
+engine.weights = [];
+engine = class(engine, 'likelihood_weighting_inf_engine', inf_engine(bnet));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@likelihood_weighting_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@likelihood_weighting_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+function marginal = marginal_nodes(engine, nodes)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (likelihood_weighting)
+% marginal = marginal_nodes(engine, nodes)
+
+bnet = bnet_from_engine(engine);
+ddom = myintersect(nodes, bnet.dnodes);
+cdom = myintersect(nodes, bnet.cnodes);
+nsamples = size(engine.samples, 1);
+ns = bnet.node_sizes;
+
+%w = normalise(engine.weights);
+w = engine.weights;
+if mysubset(nodes, ddom)
+ T = 0*myones(ns(nodes));
+ P = prod(ns(nodes));
+ indices = ind2subv(ns(nodes), 1:P);
+ samples = reshape(cat(1, engine.samples{:,nodes}), nsamples, length(nodes));
+ for j = 1:P
+ rows = find_rows(samples, indices(j,:));
+ T(j) = sum(w(rows));
+ end
+ T = normalise(T);
+ marginal.T = T;
+elseif subset(nodes, cdom)
+ samples = reshape(cat(1, engine.samples{:,nodes}), nsamples*sum(ns(nodes)), length(nodes));
+ [marginal.mu, marginal.Sigma] = wstats(samples', normalise(w));
+else
+ error('can''t handle mixed marginals yet');
+end
+
+marginal.domain = nodes;
+
+%%%%%%%%%
+
+function rows = find_rows(M, v)
+% FINDROWS Find rows which are equal to a specified vector
+% rows = findrows(M, v)
+% Each row of M is a sample
+
+temp = abs(M - repmat(v, size(M, 1), 1));
+rows = find(sum(temp,2) == 0);
+
+%%%%%%%%
+
+function [mu, Sigma] = wstats(X, w)
+
+% Computes the weighted mean and weighted covariance matrix for a given
+% set of observations X(:,i), and a set of normalised weights w(i).
+% Each column of X is a sample.
+
+d = X - repmat(X * w', 1, size(X, 2));
+mu = sum(X .* repmat(w, size(X, 1), 1), 2);
+Sigma = d * diag(w) * d';
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/bethe_free_energy.m/1.1.1.1/Sun Jul 6 20:57:18 2003//
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/loopy_converged.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_family.m/1.1.1.1/Fri Oct 18 20:05:16 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/pearl_inf_engine.m/1.1.1.1/Sat Jan 11 18:53:28 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@pearl_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/bethe_free_energy.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/bethe_free_energy.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+function loglik = bethe_free_energy(engine, evidence)
+% BETHE_FREE_ENERGY Compute Bethe free energy approximation to the log likelihood
+% loglik = bethe_free_energy(engine, evidence)
+%
+% The Bethe free energy is given by an exact energy term and an approximate entropy term.
+% Energy
+% E = -sum_f sum_i b(f,i) ln theta(f,i)
+% where b(f,i) = approximate Pr(family f = i)
+% and theta(f,i) = Pr(f = i)
+% Entropy
+% S = H1 - H2
+% H1 = sum_f sum_p H(b(f))
+% where b(f) = belief on family f, H(.) = entropy
+% H2 = sum_n (q(n)-1) H(b(n))
+% where q(n) = num. neighbors of n
+%
+% This function was written by Yair Weiss, 8/22/01.
+
+hidden = find(isemptycell(evidence));
+bnet = bnet_from_engine(engine);
+N = length(bnet.dag);
+
+add_ev = 1;
+E=0;H1=0;H2=0;
+loglik=0;
+for n=1:N
+ ps=parents(bnet.dag,n);
+ if (length(ps)==0) % root node
+ qi=length(children(bnet.dag,n))-1;
+ else
+ qi=length(children(bnet.dag,n));
+ end
+ bf = marginal_family(engine, n, add_ev);
+ bf = bf.T(:);
+ e = bnet.equiv_class(n);
+ T = CPD_to_CPT(bnet.CPD{e});
+ T = T(:);
+ E = E-sum(log(T+(T==0)).*bf);
+
+ if length(ps) > 0
+ % root nodes don't count as fmailies
+ H1 = H1+sum(log(bf+(bf==0)).*bf);
+ end
+
+ bi = marginal_nodes(engine, n, add_ev);
+ bi = bi.T(:);
+ H2 = H2+qi*sum(log(bi+(bi==0)).*bi);
+end
+loglik=E+H1-H2;
+loglik=-loglik;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,153 @@
+function [engine, loglik, niter] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (pearl)
+% [engine, loglik, num_iter] = enter_evidence(engine, evidence, ...)
+% evidence{i} = [] if if X(i) is hidden, and otherwise contains its observed value (scalar or column vector)
+%
+% The following optional arguments can be specified in the form of name/value pa irs:
+% [default value in brackets]
+%
+% maximize - if 1, does max-product instead of sum-product [0]
+% 'filename' - msgs will be printed to this file, so you can assess convergence while it runs [engine.filename]
+%
+% e.g., engine = enter_evidence(engine, ev, 'maximize', 1)
+%
+% For discrete nodes, loglik is the negative Bethe free energy evaluated at the final beliefs.
+% For Gaussian nodes, loglik is currently always 0.
+%
+% 'num_iter' returns the number of iterations used.
+
+maximize = 0;
+filename = engine.filename;
+
+% parse optional params
+args = varargin;
+nargs = length(args);
+if nargs > 0
+ for i=1:2:nargs
+ switch args{i},
+ case 'maximize', maximize = args{i+1};
+ case 'filename', filename = args{i+1};
+ otherwise,
+ error(['invalid argument name ' args{i}]);
+ end
+ end
+end
+
+
+if maximize
+ error('can''t handle max-prop yet')
+end
+
+engine.maximize = maximize;
+engine.filename = filename;
+engine.bel = []; % reset if necessary
+
+bnet = bnet_from_engine(engine);
+N = length(bnet.dag);
+ns = bnet.node_sizes(:);
+
+observed_bitv = ~isemptycell(evidence);
+disconnected = find(engine.disconnected_nodes_bitv);
+if ~all(observed_bitv(disconnected))
+ error(['The following discrete nodes must be observed: ' num2str(disconnected)])
+end
+msg = init_pearl_msgs(engine.msg_type, engine.msg_dag, ns, evidence);
+
+niter = 1;
+switch engine.protocol
+ case 'parallel', [msg, niter] = parallel_protocol(engine, evidence, msg);
+ case 'tree', msg = tree_protocol(engine, evidence, msg);
+ otherwise,
+ error(['unrecognized protocol ' engine.protocol])
+end
+engine.niter = niter;
+
+engine.marginal = cell(1,N);
+nodes = find(~engine.disconnected_nodes_bitv);
+for n=nodes(:)'
+ engine.marginal{n} = compute_bel(engine.msg_type, msg{n}.pi, msg{n}.lambda);
+end
+
+engine.evidence = evidence; % needed by marginal_nodes and marginal_family
+engine.msg = msg; % needed by marginal_family
+
+if (nargout >= 2)
+ if (engine.msg_type == 'd')
+ loglik = bethe_free_energy(engine, evidence);
+ else
+ loglik = 0;
+ end
+end
+
+
+
+%%%%%%%%%%%
+
+function msg = init_pearl_msgs(msg_type, dag, ns, evidence)
+% INIT_MSGS Initialize the lambda/pi message and state vectors
+% msg = init_msgs(dag, ns, evidence)
+%
+
+N = length(dag);
+msg = cell(1,N);
+observed = ~isemptycell(evidence);
+lam_msg = 1;
+
+for n=1:N
+ ps = parents(dag, n);
+ msg{n}.pi_from_parent = cell(1, length(ps));
+ for i=1:length(ps)
+ p = ps(i);
+ msg{n}.pi_from_parent{i} = mk_msg(msg_type, ns(p));
+ end
+
+ cs = children(dag, n);
+ msg{n}.lambda_from_child = cell(1, length(cs));
+ for i=1:length(cs)
+ c = cs(i);
+ msg{n}.lambda_from_child{i} = mk_msg(msg_type, ns(n), lam_msg);
+ end
+
+ msg{n}.lambda = mk_msg(msg_type, ns(n), lam_msg);
+ msg{n}.pi = mk_msg(msg_type, ns(n));
+
+ if observed(n)
+ msg{n}.lambda_from_self = mk_msg_with_evidence(msg_type, ns(n), evidence{n});
+ else
+ msg{n}.lambda_from_self = mk_msg(msg_type, ns(n), lam_msg);
+ end
+end
+
+
+
+%%%%%%%%%
+
+function msg = mk_msg(msg_type, sz, is_lambda_msg)
+
+if nargin < 3, is_lambda_msg = 0; end
+
+switch msg_type
+ case 'd', msg = ones(sz, 1);
+ case 'g',
+ if is_lambda_msg
+ msg.precision = zeros(sz, sz);
+ msg.info_state = zeros(sz, 1);
+ else
+ msg.Sigma = zeros(sz, sz);
+ msg.mu = zeros(sz,1);
+ end
+end
+
+%%%%%%%%%%%%
+
+function msg = mk_msg_with_evidence(msg_type, sz, val)
+
+switch msg_type
+ case 'd',
+ msg = zeros(sz, 1);
+ msg(val) = 1;
+ case 'g',
+ %msg.observed_val = val(:);
+ msg.precision = inf;
+ msg.mu = val(:);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/loopy_converged.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/loopy_converged.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function niter = loopy_converged(engine)
+% LOOPY_CONVERGED Did loopy belief propagation converge? 0 means no, eles we return the num. iterations.
+% function niter = loopy_converged(engine)
+%
+% We use a simple heuristic: we say convergence occurred if the number of iterations
+% used was less than the maximum allowed.
+
+if engine.niter == engine.max_iter
+ niter = 0;
+else
+ niter = engine.niter;
+end
+%conv = (strcmp(engine.protocol, 'tree') | (engine.niter < engine.max_iter));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/marginal_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/marginal_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,80 @@
+function m = marginal_family(engine, n, add_ev)
+% MARGINAL_FAMILY Compute the marginal on i's family (loopy)
+% m = marginal_family(engine, n, add_ev)
+
+if nargin < 3, add_ev = 0; end
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes;
+ps = parents(bnet.dag, n);
+dom = [ps n];
+CPD = bnet.CPD{bnet.equiv_class(n)};
+
+switch engine.msg_type
+ case 'd',
+ % The method is similar to the following HMM equation:
+ % xi(i,j,t) = normalise( alpha(i,t) * transmat(i,j) * obsmat(j,t+1) * beta(j,t+1) )
+ % where xi(i,j,t) = Pr(Q(t)=i, Q(t+1)=j | y(1:T))
+ % beta == lambda, alpha == pi, alpha from each parent = pi msg
+ % In general, if A,B are parents of C,
+ % P(A,B,C) = P(C|A,B) pi_msg(A->C) pi_msg(B->C) lambda(C)
+ % where lambda(C) = P(ev below and including C|C) = prod incoming lamba_msg(children->C)
+ % and pi_msg(X->C) = P(X|ev above) etc
+
+ T = dpot(dom, ns(dom), CPD_to_CPT(CPD));
+ for j=1:length(ps)
+ p = ps(j);
+ pi_msg = dpot(p, ns(p), engine.msg{n}.pi_from_parent{j});
+ T = multiply_by_pot(T, pi_msg);
+ end
+ lambda = dpot(n, ns(n), engine.msg{n}.lambda);
+ T = multiply_by_pot(T, lambda);
+ T = normalize_pot(T);
+ m = pot_to_marginal(T);
+ if ~add_ev
+ m.T = shrink_obs_dims_in_table(m.T, dom, engine.evidence);
+ end
+ case 'g',
+ if engine.disconnected_nodes_bitv(n)
+ m.T = 1;
+ m.domain = dom;
+ if add_ev
+ m = add_ev_to_dmarginal(m, engine.evidence, ns)
+ end
+ return;
+ end
+
+ [m, C, W] = gaussian_CPD_params_given_dps(CPD, dom, engine.evidence);
+ cdom = myintersect(dom, bnet.cnodes);
+ pot = linear_gaussian_to_cpot(m, C, W, dom, ns, cdom, engine.evidence);
+ % linear_gaussian_to_cpot will set the effective size of observed nodes to 0,
+ % so we need to do this explicitely for the messages, too,
+ % so they are all the same size.
+ obs_bitv = ~isemptycell(engine.evidence);
+ ps = parents(engine.msg_dag, n);
+ for j=1:length(ps)
+ p = ps(j);
+ msg = engine.msg{n}.pi_from_parent{j};
+ if obs_bitv(p)
+ pi_msg = mpot(p, 0);
+ else
+ pi_msg = mpot(p, ns(p), 0, msg.mu, msg.Sigma);
+ end
+ pot = multiply_by_pot(pot, mpot_to_cpot(pi_msg));
+ end
+ msg = engine.msg{n}.lambda;
+ if obs_bitv(n)
+ lambda = cpot(n, 0);
+ else
+ lambda = cpot(n, ns(n), 0, msg.info_state, msg.precision);
+ end
+ pot = multiply_by_pot(pot, lambda);
+ m = pot_to_marginal(pot);
+ if add_ev
+ m = add_evidence_to_gmarginal(m, engine.evidence, bnet.node_sizes, bnet.cnodes);
+ end
+end
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+function marginal = marginal_nodes(engine, query, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (loopy)
+% marginal = marginal_nodes(engine, query, add_ev)
+%
+% 'query' must be a single node.
+% add_ev is an optional argument; if 1, observed nodes will be set to their original size,
+% otherwise they will be treated like points.
+
+if nargin < 3, add_ev = 0; end
+
+if length(query) > 1
+ error('can only compute marginal on single nodes or families')
+end
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes(:);
+
+switch engine.msg_type
+ case 'd',
+ T = engine.marginal{query};
+ if ~add_ev
+ marginal.T = shrink_obs_dims_in_table(T, query, engine.evidence);
+ else
+ marginal.T = T;
+ end
+ marginal.domain = query;
+
+ case 'g',
+ if engine.disconnected_nodes_bitv(query)
+ marginal.T = 1;
+ marginal.domain = query;
+ if add_ev
+ marginal = add_ev_to_dmarginal(marginal, engine.evidence, ns)
+ end
+ return;
+ end
+
+ marginal = engine.marginal{query};
+ marginal.domain = query;
+ if ~add_ev
+ marginal = shrink_obs_dims_in_gaussian(marginal, query, engine.evidence, ns);
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/pearl_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/pearl_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,158 @@
+function engine = pearl_inf_engine(bnet, varargin)
+% PEARL_INF_ENGINE Pearl's algorithm (belief propagation)
+% engine = pearl_inf_engine(bnet, ...)
+%
+% If the graph has no loops (undirected cycles), you should use the tree protocol,
+% and the results will be exact.
+% Otherwise, you should use the parallel protocol, and the results may be approximate.
+%
+% Optional arguments [default in brackets]
+% 'protocol' - tree or parallel ['parallel']
+%
+% Optional arguments for the loopy case
+% 'max_iter' - specifies the max num. iterations to perform [2*num nodes]
+% 'tol' - convergence criterion on messages [1e-3]
+% 'momentum' - msg = (m*old + (1-m)*new). [m=0]
+% 'filename' - msgs will be printed to this file, so you can assess convergence while it runs [[]]
+% 'storebel' - 1 means save engine.bel{n,t} for every iteration t and hidden node n [0]
+%
+% If there are discrete and cts nodes, we assume all the discretes are observed. In this
+% case, you must use the parallel protocol, and the evidence pattern must be fixed.
+
+
+N = length(bnet.dag);
+protocol = 'parallel';
+max_iter = 2*N;
+% We use N+2 for the following reason:
+% In N iterations, we get the exact answer for a tree.
+% In the N+1st iteration, we notice that the results are the same as before, and terminate.
+% In loopy_converged, we see that N+1 < max = N+2, and declare convergence.
+tol = 1e-3;
+momentum = 0;
+filename = [];
+storebel = 0;
+
+args = varargin;
+for i=1:2:length(args)
+ switch args{i},
+ case 'protocol', protocol = args{i+1};
+ case 'max_iter', max_iter = args{i+1};
+ case 'tol', tol = args{i+1};
+ case 'momentum', momentum = args{i+1};
+ case 'filename', filename = args{i+1};
+ case 'storebel', storebel = args{i+1};
+ end
+end
+
+engine.filename = filename;
+engine.storebel = storebel;
+engine.bel = [];
+
+if strcmp(protocol, 'tree')
+ % We first send messages up to the root (pivot node), and then back towards the leaves.
+ % If the bnet is a singly connected graph (no loops), choosing a root induces a directed tree.
+ % Peot and Shachter discuss ways to pick the root so as to minimize the work,
+ % taking into account which nodes have changed.
+ % For simplicity, we always pick the root to be the last node in the graph.
+ % This means the first pass is equivalent to going forward in time in a DBN.
+
+ engine.root = N;
+ [engine.adj_mat, engine.preorder, engine.postorder, loopy] = ...
+ mk_rooted_tree(bnet.dag, engine.root);
+ % engine.adj_mat might have different edge orientations from bnet.dag
+ if loopy
+ error('can only apply tree protocol to loop-less graphs')
+ end
+else
+ engine.root = [];
+ engine.adj_mat = [];
+ engine.preorder = [];
+ engine.postorder = [];
+end
+
+engine.niter = [];
+engine.protocol = protocol;
+engine.max_iter = max_iter;
+engine.tol = tol;
+engine.momentum = momentum;
+engine.maximize = [];
+
+%onodes = find(~isemptycell(evidence));
+onodes = bnet.observed;
+engine.msg_type = determine_pot_type(bnet, onodes, 1:N); % needed also by marginal_nodes
+if strcmp(engine.msg_type, 'cg')
+ error('messages must be discrete or Gaussian')
+end
+[engine.msg_dag, disconnected_nodes] = mk_msg_dag(bnet, engine.msg_type, onodes);
+engine.disconnected_nodes_bitv = zeros(1,N);
+engine.disconnected_nodes_bitv(disconnected_nodes) = 1;
+
+
+% this is where we store stuff between enter_evidence and marginal_nodes
+engine.marginal = cell(1,N);
+engine.evidence = [];
+engine.msg = [];
+
+[engine.parent_index, engine.child_index] = mk_loopy_msg_indices(engine.msg_dag);
+
+engine = class(engine, 'pearl_inf_engine', inf_engine(bnet));
+
+
+%%%%%%%%%
+
+function [dag, disconnected_nodes] = mk_msg_dag(bnet, msg_type, onodes)
+
+% If we are using Gaussian msgs, all discrete nodes must be observed;
+% they are then disconnected from the graph, so we don't try to send
+% msgs to/from them: their observed value simply serves to index into
+% the right set of parameters for the Gaussian nodes (which use CPD.ps
+% instead of parents(dag), and hence are unaffected by this "surgery").
+
+disconnected_nodes = [];
+switch msg_type
+ case 'd', dag = bnet.dag;
+ case 'g',
+ disconnected_nodes = bnet.dnodes;
+ dag = bnet.dag;
+ for i=disconnected_nodes(:)'
+ ps = parents(bnet.dag, i);
+ cs = children(bnet.dag, i);
+ if ~isempty(ps), dag(ps, i) = 0; end
+ if ~isempty(cs), dag(i, cs) = 0; end
+ end
+end
+
+
+%%%%%%%%%%
+function [parent_index, child_index] = mk_loopy_msg_indices(dag)
+% MK_LOOPY_MSG_INDICES Compute "port numbers" for message passing
+% [parent_index, child_index] = mk_loopy_msg_indices(bnet)
+%
+% child_index{n}(c) = i means c is n's i'th child, i.e., i = find_equiv_posns(c, children(n))
+% child_index{n}(c) = 0 means c is not a child of n.
+% parent_index{n}{p} is defined similarly.
+% We need to use these indices since the pi_from_parent/ lambda_from_child cell arrays
+% cannot be sparse, and hence cannot be indexed by the actual number of the node.
+% Instead, we use the number of the "port" on which the message arrived.
+
+N = length(dag);
+child_index = cell(1,N);
+parent_index = cell(1,N);
+for n=1:N
+ cs = children(dag, n);
+ child_index{n} = sparse(1,N);
+ for i=1:length(cs)
+ c = cs(i);
+ child_index{n}(c) = i;
+ end
+ ps = parents(dag, n);
+ parent_index{n} = sparse(1,N);
+ for i=1:length(ps)
+ p = ps(i);
+ parent_index{n}(p) = i;
+ end
+end
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/compute_bel.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/parallel_protocol.m/1.1.1.1/Sun Aug 21 20:00:12 2005//
+/prod_lambda_msgs.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/tree_protocol.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@pearl_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/compute_bel.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/compute_bel.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function bel = compute_bel(msg_type, pi, lambda)
+
+switch msg_type,
+ case 'd', bel = normalise(pi .* lambda);
+ case 'g',
+ if isinf(lambda.precision) % ignore pi because lambda is completely certain (observed)
+ bel.mu = lambda.mu;
+ bel.Sigma = zeros(length(bel.mu)); % infinite precision => 0 variance
+ elseif all(pi.Sigma==0) % ignore lambda because pi is completely certain (delta fn prior)
+ bel.Sigma = pi.Sigma;
+ bel.mu = pi.mu;
+ elseif all(isinf(pi.Sigma)) % ignore pi because pi is completely uncertain
+ bel.Sigma = inv(lambda.precision);
+ bel.mu = bel.Sigma * lambda.info_state;
+ elseif all(lambda.precision == 0) % ignore lambda because lambda is completely uncertain
+ bel.Sigma = pi.Sigma;
+ bel.mu = pi.mu;
+ else % combine both pi and lambda
+ pi_precision = inv(pi.Sigma);
+ bel.Sigma = inv(pi_precision + lambda.precision);
+ bel.mu = bel.Sigma*(pi_precision * pi.mu + lambda.info_state);
+ end
+ otherwise, error(['unrecognized msg type ' msg_type])
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/parallel_protocol.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/parallel_protocol.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,114 @@
+function [msg, niter] = parallel_protocol(engine, evidence, msg)
+
+bnet = bnet_from_engine(engine);
+N = length(bnet.dag);
+ns = bnet.node_sizes(:);
+
+if ~isempty(engine.filename)
+ fid = fopen(engine.filename, 'w');
+ if fid == 0
+ error(['could not open ' engine.filename ' for writing'])
+ end
+else
+ fid = [];
+end
+
+converged = 0;
+iter = 1;
+hidden = find(isemptycell(evidence));
+bel = cell(1,N);
+old_bel = cell(1,N);
+%nodes = mysetdiff(1:N, engine.disconnected_nodes);
+nodes = find(~engine.disconnected_nodes_bitv);
+while ~converged & (iter <= engine.max_iter)
+ % Everybody updates their state in parallel
+ for n=nodes(:)'
+ cs_msg = children(engine.msg_dag, n);
+ %msg{n}.lambda = compute_lambda(n, cs, msg);
+ msg{n}.lambda = prod_lambda_msgs(n, cs_msg, msg, engine.msg_type);
+ ps_orig = parents(bnet.dag, n);
+ msg{n}.pi = CPD_to_pi(bnet.CPD{bnet.equiv_class(n)}, engine.msg_type, n, ps_orig, msg, evidence);
+ end
+
+ changed = 0;
+ if ~isempty(fid)
+ fprintf(fid, 'ITERATION %d\n', iter);
+ end
+ for n=hidden(:)' % this will not contain any disconnected nodes
+ old_bel{n} = bel{n};
+ bel{n} = compute_bel(engine.msg_type, msg{n}.pi, msg{n}.lambda);
+ if ~isempty(fid)
+ fprintf(fid, 'node %d: %s\n', n, bel_to_str(bel{n}, engine.msg_type));
+ end
+ if engine.storebel
+ engine.bel{n,iter} = bel{n};
+ end
+ if (iter == 1) | ~approxeq_bel(bel{n}, old_bel{n}, engine.tol, engine.msg_type)
+ changed = 1;
+ end
+ end
+ %converged = ~changed;
+ converged = ~changed & (iter > 1); % Sonia Leach changed this
+
+ if ~converged
+ % Everybody sends to all their neighbors in parallel
+ for n=nodes(:)'
+ % lambda msgs to parents
+ ps_msg = parents(engine.msg_dag, n);
+ ps_orig = parents(bnet.dag, n);
+ for p=ps_msg(:)'
+ j = engine.child_index{p}(n); % n is p's j'th child
+ old_msg = msg{p}.lambda_from_child{j}(:);
+ new_msg = CPD_to_lambda_msg(bnet.CPD{bnet.equiv_class(n)}, engine.msg_type, n, ps_orig, ...
+ msg, p, evidence);
+ lam_msg = convex_combination_msg(old_msg, new_msg, engine.momentum, engine.msg_type);
+ msg{p}.lambda_from_child{j} = lam_msg;
+ end
+
+ % pi msgs to children
+ cs_msg = children(engine.msg_dag, n);
+ for c=cs_msg(:)'
+ j = engine.parent_index{c}(n); % n is c's j'th parent
+ old_msg = msg{c}.pi_from_parent{j}(:);
+ %new_msg = compute_pi_msg(n, cs, msg, c));
+ new_msg = compute_bel(engine.msg_type, msg{n}.pi, prod_lambda_msgs(n, cs_msg, msg, engine.msg_type, c));
+ pi_msg = convex_combination_msg(old_msg, new_msg, engine.momentum, engine.msg_type);
+ msg{c}.pi_from_parent{j} = pi_msg;
+ end
+ end
+ iter = iter + 1;
+ end
+end
+
+if fid > 0, fclose(fid); end
+%niter = iter - 1;
+niter = iter;
+
+%%%%%%%%%%
+
+function str = bel_to_str(bel, type)
+
+switch type
+ case 'd', str = sprintf('%9.4f ', bel(:)');
+ case 'g', str = sprintf('%9.4f ', bel.mu(:)');
+end
+
+
+%%%%%%%
+
+function a = approxeq_bel(bel1, bel2, tol, type)
+
+switch type
+ case 'd', a = approxeq(bel1, bel2, tol);
+ case 'g', a = approxeq(bel1.mu, bel2.mu, tol) & approxeq(bel1.Sigma, bel2.Sigma, tol);
+end
+
+
+%%%%%%%
+
+function msg = convex_combination_msg(old_msg, new_msg, old_weight, type)
+
+switch type
+ case 'd', msg = old_weight * old_msg + (1-old_weight)*new_msg;
+ case 'g', msg = new_msg;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/prod_lambda_msgs.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/prod_lambda_msgs.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function lam = prod_lambda_msgs(n, cs, msg, msg_type, except)
+
+if nargin < 5, except = -1; end
+
+lam = msg{n}.lambda_from_self;
+switch msg_type
+ case 'd',
+ for i=1:length(cs)
+ c = cs(i);
+ if c ~= except
+ lam = lam .* msg{n}.lambda_from_child{i};
+ end
+ end
+ case 'g',
+ if isinf(lam.precision) % isfield(lam, 'observed_val')
+ return; % pass on the observed msg
+ end
+ for i=1:length(cs)
+ c = cs(i);
+ if c ~= except
+ m = msg{n}.lambda_from_child{i};
+ lam.precision = lam.precision + m.precision;
+ lam.info_state = lam.info_state + m.info_state;
+ end
+ end
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/tree_protocol.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@pearl_inf_engine/private/tree_protocol.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+function msg = tree_protocol(engine, evidence, msg)
+
+bnet = bnet_from_engine(engine);
+N = length(bnet.dag);
+
+% Send messages from leaves to root
+for i=1:N-1
+ n = engine.postorder(i);
+ above = parents(engine.adj_mat, n);
+ msg = send_msgs_to_some_neighbors(n, msg, above, bnet, engine.child_index, engine.parent_index, ...
+ engine.msg_type, evidence);
+end
+
+% Process root
+n = engine.root;
+cs = children(bnet.dag, n);
+%msg{n}.lambda = compute_lambda(n, cs, msg, engine.msg_type);
+msg{n}.lambda = prod_lambda_msgs(n, cs, msg, engine.msg_type);
+ps = parents(bnet.dag, n);
+msg{n}.pi = CPD_to_pi(bnet.CPD{bnet.equiv_class(n)}, engine.msg_type, n, ps, msg, evidence);
+
+% Send messages from root to leaves
+for i=1:N
+ n = engine.preorder(i);
+ below = children(engine.adj_mat, n);
+ msg = send_msgs_to_some_neighbors(n, msg, below, bnet, engine.child_index, engine.parent_index, ...
+ engine.msg_type, evidence);
+end
+
+
+%%%%%%%%%%
+
+function msg = send_msgs_to_some_neighbors(n, msg, valid_nbrs, bnet, child_index, parent_index, ...
+ msg_type, evidence)
+
+verbose = 0;
+
+ns = bnet.node_sizes;
+dag = bnet.dag;
+e = bnet.equiv_class(n);
+CPD = bnet.CPD{e};
+
+
+cs = children(dag, n);
+%msg{n}.lambda = compute_lambda(n, cs, msg);
+msg{n}.lambda = prod_lambda_msgs(n, cs, msg, msg_type);
+if verbose, fprintf('%d computes lambda\n', n); display(msg{n}.lambda); end
+
+ps = parents(dag, n);
+msg{n}.pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence);
+if verbose, fprintf('%d computes pi\n', n); display(msg{n}.pi); end
+
+ps2 = myintersect(parents(dag, n), valid_nbrs);
+for p=ps2(:)'
+ lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence);
+ j = child_index{p}(n); % n is p's j'th child
+ msg{p}.lambda_from_child{j} = lam_msg;
+ if verbose, fprintf('%d sends lambda to %d\n', n, p); display(lam_msg); end
+end
+
+cs2 = myintersect(cs, valid_nbrs);
+for c=cs2(:)'
+ %pi_msg = compute_pi_msg(n, cs, msg, c);
+ pi_msg = compute_bel(msg_type, msg{n}.pi, prod_lambda_msgs(n, cs, msg, msg_type, c));
+ j = parent_index{c}(n); % n is c's j'th parent
+ msg{c}.pi_from_parent{j} = pi_msg;
+ if verbose, fprintf('%d sends pi to %d\n', n, c); display(pi_msg); end
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/enter_evidence.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/marginal_nodes.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/quickscore_inf_engine.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D/private////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@quickscore_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function engine = enter_evidence(engine, pos, neg)
+% ENTER_EVIDENCE Add evidence to the QMR network
+% engine = enter_evidence(engine, pos, neg)
+%
+% pos = list of leaves that have positive observations
+% neg = list of leaves that have negative observations
+
+% Extract params for the observed findings
+obs = myunion(pos, neg);
+%inhibit_obs = engine.inhibit(obs, :);
+inhibit_obs = engine.inhibit(:,obs)';
+leak_obs = engine.leak(obs);
+
+% Find what nodes correspond to the original observed leaves
+pos2 = find_equiv_posns(pos, obs);
+neg2 = find_equiv_posns(neg, obs);
+engine.post = quickscore(pos2, neg2, inhibit_obs, engine.prior, leak_obs);
+%engine.post = C_quickscore(pos2, neg2, inhibit_obs, engine.prior, leak_obs);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function m = marginal_nodes(engine, query)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (quickscore)
+% marginal = marginal_nodes(engine, query)
+%
+% 'query' must be a single disease (root) node.
+
+assert(length(query)==1);
+p = engine.post(query);
+m.T = [1-p p]';
+m.domain = query;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/C_quickscore.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/nr.h/1.1.1.1/Wed May 29 15:59:56 2002//
+/nrutil.c/1.1.1.1/Wed May 29 15:59:56 2002//
+/nrutil.h/1.1.1.1/Wed May 29 15:59:56 2002//
+/quickscore.m/1.1.1.1/Wed May 29 15:59:56 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@quickscore_inf_engine/private
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/C_quickscore.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/C_quickscore.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,164 @@
+/* To compile, type "mex C_quickscore.c" */
+
+#include
+#include "nrutil.h"
+#include "nrutil.c"
+#include
+#include "mex.h"
+
+#define MAX(X,Y) (X)>(Y)?(X):(Y)
+
+int two_to_the(int n)
+{
+ return 1 << n;
+}
+
+void int2bin(int num, int nbits, int bits[])
+{
+ int i, mask;
+ mask = 1 << (nbits-1); /* mask = 0010...0 , where the 1 is in col nbits (rightmost = col 1) */
+ for (i = 0; i < nbits; i++) {
+ bits[i] = ((num & mask) == 0) ? 0 : 1;
+ num <<= 1;
+ }
+}
+
+
+void quickscore(int ndiseases, int nfindings, const double *fpos, int npos, const double *fneg, int nneg,
+ const double *inhibit, const double *prior, const double *leak, double *prob)
+{
+ double *Pon, *Poff, **Uon, **Uoff, **post, *pterm, *ptermOff, *ptermOn, temp, p, myp;
+ int *bits, nsubsets, *fmask;
+ int f, d, i, j, si, size_subset, sign;
+
+ Pon = dvector(0, ndiseases);
+ Poff = dvector(0, ndiseases);
+ Pon[0] = 1;
+ Poff[0] = 0;
+ for (i=1; i <= ndiseases; i++) {
+ Pon[i] = prior[i-1];
+ Poff[i] = 1-Pon[i];
+ }
+
+ Uon = dmatrix(0, nfindings-1, 0, ndiseases);
+ Uoff = dmatrix(0, nfindings-1, 0, ndiseases);
+ d = 0;
+ for (f=0; f < nfindings; f++) {
+ Uon[f][d] = leak[f];
+ Uoff[f][d] = leak[f];
+ }
+ for (f=0; f < nfindings; f++) {
+ for (d=1; d <= ndiseases; d++) {
+ Uon[f][d] = inhibit[f + nfindings*(d-1)];
+ Uoff[f][d] = 1;
+ }
+ }
+
+ post = dmatrix(0, ndiseases, 0, 1);
+ for (d = 0; d <= ndiseases; d++) {
+ post[d][0] = 0;
+ post[d][1] = 0;
+ }
+
+ bits = ivector(0, npos-1);
+ fmask = ivector(0, nfindings-1);
+ pterm = dvector(0, ndiseases);
+ ptermOff = dvector(0, ndiseases);
+ ptermOn = dvector(0, ndiseases);
+
+ nsubsets = two_to_the(npos);
+
+ for (si = 0; si < nsubsets; si++) {
+ int2bin(si, npos, bits);
+ for (i=0; i < nfindings; i++) fmask[i] = 0;
+ for (i=0; i < nneg; i++) fmask[(int)fneg[i]-1] = 1;
+ size_subset = 0;
+ for (i=0; i < npos; i++) {
+ if (bits[i]) {
+ size_subset++;
+ fmask[(int)fpos[i]-1] = 1;
+ }
+ }
+ p = 1;
+ for (d=0; d <= ndiseases; d++) {
+ temp = 1;
+ for (j = 0; j < nfindings; j++) {
+ if (fmask[j]) temp *= Uoff[j][d];
+ }
+ ptermOff[d] = temp;
+
+ temp = 1;
+ for (j = 0; j < nfindings; j++) {
+ if (fmask[j]) temp *= Uon[j][d];
+ }
+ ptermOn[d] = temp;
+
+ pterm[d] = Poff[d]*ptermOff[d] + Pon[d]*ptermOn[d];
+ p *= pterm[d];
+ }
+ sign = (int) pow(-1, size_subset);
+ for (d=0; d <= ndiseases; d++) {
+ myp = p / pterm[d];
+ post[d][0] += sign*(myp * ptermOff[d]);
+ post[d][1] += sign*(myp * ptermOn[d]);
+ }
+ } /* next si */
+
+
+ for (d=0; d <= ndiseases; d++) {
+ post[d][0] *= Poff[d];
+ post[d][1] *= Pon[d];
+ }
+ for (d=0; d <= ndiseases; d++) {
+ temp = post[d][0] + post[d][1];
+ post[d][0] /= temp;
+ post[d][1] /= temp;
+ if (d>0) { prob[d-1] = post[d][1]; }
+ }
+
+
+ free_dvector(Pon, 0, ndiseases);
+ free_dvector(Poff, 0, ndiseases);
+ free_dmatrix(Uon, 0, nfindings-1, 0, ndiseases);
+ free_dmatrix(Uoff, 0, nfindings-1, 0, ndiseases);
+ free_dmatrix(post, 0, ndiseases, 0, 1);
+ free_ivector(bits, 0, npos-1);
+ free_ivector(fmask, 0, nfindings-1);
+ free_dvector(pterm, 0, ndiseases);
+ free_dvector(ptermOff, 0, ndiseases);
+ free_dvector(ptermOn, 0, ndiseases);
+}
+
+
+void mexFunction(
+ int nlhs, mxArray *plhs[],
+ int nrhs, const mxArray *prhs[]
+ )
+{
+ double *fpos, *fneg, *inhibit, *prior, *leak, *prob;
+ int npos, nneg, ndiseases, nfindings;
+ double *p;
+
+ /* read the input args */
+ fpos = mxGetPr(prhs[0]);
+ npos = MAX(mxGetM(prhs[0]), mxGetN(prhs[0]));
+
+ fneg = mxGetPr(prhs[1]);
+ nneg = MAX(mxGetM(prhs[1]), mxGetN(prhs[1]));
+
+ inhibit = mxGetPr(prhs[2]); /* inhibit(finding, disease) */
+ nfindings = mxGetM(prhs[2]);
+ ndiseases = mxGetN(prhs[2]);
+
+ prior = mxGetPr(prhs[3]);
+
+ leak = mxGetPr(prhs[4]);
+
+
+ /* set the output pointers */
+ plhs[0] = mxCreateDoubleMatrix(1, ndiseases, mxREAL);
+ prob = mxGetPr(plhs[0]);
+
+ quickscore(ndiseases, nfindings, fpos, npos, fneg, nneg, inhibit, prior, leak, prob);
+}
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/nr.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/nr.h Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,536 @@
+/* CAUTION: This is the ANSI C (only) version of the Numerical Recipes
+ utility file nr.h. Do not confuse this file with the same-named
+ file nr.h that is supplied in the 'misc' subdirectory.
+ *That* file is the one from the book, and contains both ANSI and
+ traditional K&R versions, along with #ifdef macros to select the
+ correct version. *This* file contains only ANSI C. */
+
+#ifndef _NR_H_
+#define _NR_H_
+
+#ifndef _FCOMPLEX_DECLARE_T_
+typedef struct FCOMPLEX {float r,i;} fcomplex;
+#define _FCOMPLEX_DECLARE_T_
+#endif /* _FCOMPLEX_DECLARE_T_ */
+
+#ifndef _ARITHCODE_DECLARE_T_
+typedef struct {
+ unsigned long *ilob,*iupb,*ncumfq,jdif,nc,minint,nch,ncum,nrad;
+} arithcode;
+#define _ARITHCODE_DECLARE_T_
+#endif /* _ARITHCODE_DECLARE_T_ */
+
+#ifndef _HUFFCODE_DECLARE_T_
+typedef struct {
+ unsigned long *icod,*ncod,*left,*right,nch,nodemax;
+} huffcode;
+#define _HUFFCODE_DECLARE_T_
+#endif /* _HUFFCODE_DECLARE_T_ */
+
+#include
+
+void addint(double **uf, double **uc, double **res, int nf);
+void airy(float x, float *ai, float *bi, float *aip, float *bip);
+void amebsa(float **p, float y[], int ndim, float pb[], float *yb,
+ float ftol, float (*funk)(float []), int *iter, float temptr);
+void amoeba(float **p, float y[], int ndim, float ftol,
+ float (*funk)(float []), int *iter);
+float amotry(float **p, float y[], float psum[], int ndim,
+ float (*funk)(float []), int ihi, float fac);
+float amotsa(float **p, float y[], float psum[], int ndim, float pb[],
+ float *yb, float (*funk)(float []), int ihi, float *yhi, float fac);
+void anneal(float x[], float y[], int iorder[], int ncity);
+double anorm2(double **a, int n);
+void arcmak(unsigned long nfreq[], unsigned long nchh, unsigned long nradd,
+ arithcode *acode);
+void arcode(unsigned long *ich, unsigned char **codep, unsigned long *lcode,
+ unsigned long *lcd, int isign, arithcode *acode);
+void arcsum(unsigned long iin[], unsigned long iout[], unsigned long ja,
+ int nwk, unsigned long nrad, unsigned long nc);
+void asolve(unsigned long n, double b[], double x[], int itrnsp);
+void atimes(unsigned long n, double x[], double r[], int itrnsp);
+void avevar(float data[], unsigned long n, float *ave, float *var);
+void balanc(float **a, int n);
+void banbks(float **a, unsigned long n, int m1, int m2, float **al,
+ unsigned long indx[], float b[]);
+void bandec(float **a, unsigned long n, int m1, int m2, float **al,
+ unsigned long indx[], float *d);
+void banmul(float **a, unsigned long n, int m1, int m2, float x[], float b[]);
+void bcucof(float y[], float y1[], float y2[], float y12[], float d1,
+ float d2, float **c);
+void bcuint(float y[], float y1[], float y2[], float y12[],
+ float x1l, float x1u, float x2l, float x2u, float x1,
+ float x2, float *ansy, float *ansy1, float *ansy2);
+void beschb(double x, double *gam1, double *gam2, double *gampl,
+ double *gammi);
+float bessi(int n, float x);
+float bessi0(float x);
+float bessi1(float x);
+void bessik(float x, float xnu, float *ri, float *rk, float *rip,
+ float *rkp);
+float bessj(int n, float x);
+float bessj0(float x);
+float bessj1(float x);
+void bessjy(float x, float xnu, float *rj, float *ry, float *rjp,
+ float *ryp);
+float bessk(int n, float x);
+float bessk0(float x);
+float bessk1(float x);
+float bessy(int n, float x);
+float bessy0(float x);
+float bessy1(float x);
+float beta(float z, float w);
+float betacf(float a, float b, float x);
+float betai(float a, float b, float x);
+float bico(int n, int k);
+void bksub(int ne, int nb, int jf, int k1, int k2, float ***c);
+float bnldev(float pp, int n, long *idum);
+float brent(float ax, float bx, float cx,
+ float (*f)(float), float tol, float *xmin);
+float brent_arg(float ax, float bx, float cx,
+ float (*f)(float, void*), float tol, float *xmin, void *arg);
+void broydn(float x[], int n, int *check,
+ void (*vecfunc)(int, float [], float []));
+void bsstep(float y[], float dydx[], int nv, float *xx, float htry,
+ float eps, float yscal[], float *hdid, float *hnext,
+ void (*derivs)(float, float [], float []));
+void caldat(long julian, int *mm, int *id, int *iyyy);
+void chder(float a, float b, float c[], float cder[], int n);
+float chebev(float a, float b, float c[], int m, float x);
+void chebft(float a, float b, float c[], int n, float (*func)(float));
+void chebpc(float c[], float d[], int n);
+void chint(float a, float b, float c[], float cint[], int n);
+float chixy(float bang);
+void choldc(float **a, int n, float p[]);
+void cholsl(float **a, int n, float p[], float b[], float x[]);
+void chsone(float bins[], float ebins[], int nbins, int knstrn,
+ float *df, float *chsq, float *prob);
+void chstwo(float bins1[], float bins2[], int nbins, int knstrn,
+ float *df, float *chsq, float *prob);
+void cisi(float x, float *ci, float *si);
+void cntab1(int **nn, int ni, int nj, float *chisq,
+ float *df, float *prob, float *cramrv, float *ccc);
+void cntab2(int **nn, int ni, int nj, float *h, float *hx, float *hy,
+ float *hygx, float *hxgy, float *uygx, float *uxgy, float *uxy);
+void convlv(float data[], unsigned long n, float respns[], unsigned long m,
+ int isign, float ans[]);
+void copy(double **aout, double **ain, int n);
+void correl(float data1[], float data2[], unsigned long n, float ans[]);
+void cosft(float y[], int n, int isign);
+void cosft1(float y[], int n);
+void cosft2(float y[], int n, int isign);
+void covsrt(float **covar, int ma, int ia[], int mfit);
+void crank(unsigned long n, float w[], float *s);
+void cyclic(float a[], float b[], float c[], float alpha, float beta,
+ float r[], float x[], unsigned long n);
+void daub4(float a[], unsigned long n, int isign);
+float dawson(float x);
+float dbrent(float ax, float bx, float cx,
+ float (*f)(float), float (*df)(float), float tol, float *xmin);
+void ddpoly(float c[], int nc, float x, float pd[], int nd);
+int decchk(char string[], int n, char *ch);
+void derivs(float x, float y[], float dydx[]);
+float df1dim(float x);
+void dfour1(double data[], unsigned long nn, int isign);
+void dfpmin(float p[], int n, float gtol, int *iter, float *fret,
+ float (*func)(float []), void (*dfunc)(float [], float []));
+float dfridr(float (*func)(float), float x, float h, float *err);
+void dftcor(float w, float delta, float a, float b, float endpts[],
+ float *corre, float *corim, float *corfac);
+void dftint(float (*func)(float), float a, float b, float w,
+ float *cosint, float *sinint);
+void difeq(int k, int k1, int k2, int jsf, int is1, int isf,
+ int indexv[], int ne, float **s, float **y);
+void dlinmin(float p[], float xi[], int n, float *fret,
+ float (*func)(float []), void (*dfunc)(float [], float[]));
+double dpythag(double a, double b);
+void drealft(double data[], unsigned long n, int isign);
+void dsprsax(double sa[], unsigned long ija[], double x[], double b[],
+ unsigned long n);
+void dsprstx(double sa[], unsigned long ija[], double x[], double b[],
+ unsigned long n);
+void dsvbksb(double **u, double w[], double **v, int m, int n, double b[],
+ double x[]);
+void dsvdcmp(double **a, int m, int n, double w[], double **v);
+void eclass(int nf[], int n, int lista[], int listb[], int m);
+void eclazz(int nf[], int n, int (*equiv)(int, int));
+float ei(float x);
+void eigsrt(float d[], float **v, int n);
+float elle(float phi, float ak);
+float ellf(float phi, float ak);
+float ellpi(float phi, float en, float ak);
+void elmhes(float **a, int n);
+float erfcc(float x);
+float erff(float x);
+float erffc(float x);
+void eulsum(float *sum, float term, int jterm, float wksp[]);
+float evlmem(float fdt, float d[], int m, float xms);
+float expdev(long *idum);
+float expint(int n, float x);
+float f1(float x);
+float f1dim(float x);
+float f1dim_arg(float x, void *arg);
+float f2(float y);
+float f3(float z);
+float factln(int n);
+float factrl(int n);
+void fasper(float x[], float y[], unsigned long n, float ofac, float hifac,
+ float wk1[], float wk2[], unsigned long nwk, unsigned long *nout,
+ unsigned long *jmax, float *prob);
+void fdjac(int n, float x[], float fvec[], float **df,
+ void (*vecfunc)(int, float [], float []));
+void fgauss(float x, float a[], float *y, float dyda[], int na);
+void fill0(double **u, int n);
+void fit(float x[], float y[], int ndata, float sig[], int mwt,
+ float *a, float *b, float *siga, float *sigb, float *chi2, float *q);
+void fitexy(float x[], float y[], int ndat, float sigx[], float sigy[],
+ float *a, float *b, float *siga, float *sigb, float *chi2, float *q);
+void fixrts(float d[], int m);
+void fleg(float x, float pl[], int nl);
+void flmoon(int n, int nph, long *jd, float *frac);
+float fmin(float x[]);
+void four1(float data[], unsigned long nn, int isign);
+void fourew(FILE *file[5], int *na, int *nb, int *nc, int *nd);
+void fourfs(FILE *file[5], unsigned long nn[], int ndim, int isign);
+void fourn(float data[], unsigned long nn[], int ndim, int isign);
+void fpoly(float x, float p[], int np);
+void fred2(int n, float a, float b, float t[], float f[], float w[],
+ float (*g)(float), float (*ak)(float, float));
+float fredin(float x, int n, float a, float b, float t[], float f[], float w[],
+ float (*g)(float), float (*ak)(float, float));
+void frenel(float x, float *s, float *c);
+void frprmn(float p[], int n, float ftol, int *iter, float *fret,
+ float (*func)(float []), void (*dfunc)(float [], float []));
+void frprmn_arg(float p[], int n, float ftol, int *iter, float *fret,
+ float (*func)(float [], void*), void (*dfunc)(float [], float [], void*), void* arg);
+void ftest(float data1[], unsigned long n1, float data2[], unsigned long n2,
+ float *f, float *prob);
+float gamdev(int ia, long *idum);
+float gammln(float xx);
+float gammp(float a, float x);
+float gammq(float a, float x);
+float gasdev(long *idum);
+void gaucof(int n, float a[], float b[], float amu0, float x[], float w[]);
+void gauher(float x[], float w[], int n);
+void gaujac(float x[], float w[], int n, float alf, float bet);
+void gaulag(float x[], float w[], int n, float alf);
+void gauleg(float x1, float x2, float x[], float w[], int n);
+void gaussj(float **a, int n, float **b, int m);
+void gcf(float *gammcf, float a, float x, float *gln);
+float golden(float ax, float bx, float cx, float (*f)(float), float tol,
+ float *xmin);
+void gser(float *gamser, float a, float x, float *gln);
+void hpsel(unsigned long m, unsigned long n, float arr[], float heap[]);
+void hpsort(unsigned long n, float ra[]);
+void hqr(float **a, int n, float wr[], float wi[]);
+void hufapp(unsigned long index[], unsigned long nprob[], unsigned long n,
+ unsigned long i);
+void hufdec(unsigned long *ich, unsigned char *code, unsigned long lcode,
+ unsigned long *nb, huffcode *hcode);
+void hufenc(unsigned long ich, unsigned char **codep, unsigned long *lcode,
+ unsigned long *nb, huffcode *hcode);
+void hufmak(unsigned long nfreq[], unsigned long nchin, unsigned long *ilong,
+ unsigned long *nlong, huffcode *hcode);
+void hunt(float xx[], unsigned long n, float x, unsigned long *jlo);
+void hypdrv(float s, float yy[], float dyyds[]);
+fcomplex hypgeo(fcomplex a, fcomplex b, fcomplex c, fcomplex z);
+void hypser(fcomplex a, fcomplex b, fcomplex c, fcomplex z,
+ fcomplex *series, fcomplex *deriv);
+unsigned short icrc(unsigned short crc, unsigned char *bufptr,
+ unsigned long len, short jinit, int jrev);
+unsigned short icrc1(unsigned short crc, unsigned char onech);
+unsigned long igray(unsigned long n, int is);
+void iindexx(unsigned long n, long arr[], unsigned long indx[]);
+void indexx(unsigned long n, float arr[], unsigned long indx[]);
+void interp(double **uf, double **uc, int nf);
+int irbit1(unsigned long *iseed);
+int irbit2(unsigned long *iseed);
+void jacobi(float **a, int n, float d[], float **v, int *nrot);
+void jacobn(float x, float y[], float dfdx[], float **dfdy, int n);
+long julday(int mm, int id, int iyyy);
+void kendl1(float data1[], float data2[], unsigned long n, float *tau, float *z,
+ float *prob);
+void kendl2(float **tab, int i, int j, float *tau, float *z, float *prob);
+void kermom(double w[], double y, int m);
+void ks2d1s(float x1[], float y1[], unsigned long n1,
+ void (*quadvl)(float, float, float *, float *, float *, float *),
+ float *d1, float *prob);
+void ks2d2s(float x1[], float y1[], unsigned long n1, float x2[], float y2[],
+ unsigned long n2, float *d, float *prob);
+void ksone(float data[], unsigned long n, float (*func)(float), float *d,
+ float *prob);
+void kstwo(float data1[], unsigned long n1, float data2[], unsigned long n2,
+ float *d, float *prob);
+void laguer(fcomplex a[], int m, fcomplex *x, int *its);
+void lfit(float x[], float y[], float sig[], int ndat, float a[], int ia[],
+ int ma, float **covar, float *chisq, void (*funcs)(float, float [], int));
+void linbcg(unsigned long n, double b[], double x[], int itol, double tol,
+ int itmax, int *iter, double *err);
+void linmin(float p[], float xi[], int n, float *fret,
+ float (*func)(float []));
+void linmin_arg(float p[], float xi[], int n, float *fret,
+ float (*func)(float [], void*), void *arg);
+void lnsrch(int n, float xold[], float fold, float g[], float p[], float x[],
+ float *f, float stpmax, int *check, float (*func)(float []));
+void load(float x1, float v[], float y[]);
+void load1(float x1, float v1[], float y[]);
+void load2(float x2, float v2[], float y[]);
+void locate(float xx[], unsigned long n, float x, unsigned long *j);
+void lop(double **out, double **u, int n);
+void lubksb(float **a, int n, int *indx, float b[]);
+void ludcmp(float **a, int n, int *indx, float *d);
+void machar(int *ibeta, int *it, int *irnd, int *ngrd,
+ int *machep, int *negep, int *iexp, int *minexp, int *maxexp,
+ float *eps, float *epsneg, float *xmin, float *xmax);
+void matadd(double **a, double **b, double **c, int n);
+void matsub(double **a, double **b, double **c, int n);
+void medfit(float x[], float y[], int ndata, float *a, float *b, float *abdev);
+void memcof(float data[], int n, int m, float *xms, float d[]);
+int metrop(float de, float t);
+void mgfas(double **u, int n, int maxcyc);
+void mglin(double **u, int n, int ncycle);
+float midexp(float (*funk)(float), float aa, float bb, int n);
+float midinf(float (*funk)(float), float aa, float bb, int n);
+float midpnt(float (*func)(float), float a, float b, int n);
+float midsql(float (*funk)(float), float aa, float bb, int n);
+float midsqu(float (*funk)(float), float aa, float bb, int n);
+void miser(float (*func)(float []), float regn[], int ndim, unsigned long npts,
+ float dith, float *ave, float *var);
+void mmid(float y[], float dydx[], int nvar, float xs, float htot,
+ int nstep, float yout[], void (*derivs)(float, float[], float[]));
+void mnbrak(float *ax, float *bx, float *cx, float *fa, float *fb,
+ float *fc, float (*func)(float));
+void mnbrak_arg(float *ax, float *bx, float *cx, float *fa, float *fb,
+ float *fc, float (*func)(float, void*), void *arg);
+void mnewt(int ntrial, float x[], int n, float tolx, float tolf);
+void moment(float data[], int n, float *ave, float *adev, float *sdev,
+ float *var, float *skew, float *curt);
+void mp2dfr(unsigned char a[], unsigned char s[], int n, int *m);
+void mpadd(unsigned char w[], unsigned char u[], unsigned char v[], int n);
+void mpdiv(unsigned char q[], unsigned char r[], unsigned char u[],
+ unsigned char v[], int n, int m);
+void mpinv(unsigned char u[], unsigned char v[], int n, int m);
+void mplsh(unsigned char u[], int n);
+void mpmov(unsigned char u[], unsigned char v[], int n);
+void mpmul(unsigned char w[], unsigned char u[], unsigned char v[], int n,
+ int m);
+void mpneg(unsigned char u[], int n);
+void mppi(int n);
+void mprove(float **a, float **alud, int n, int indx[], float b[],
+ float x[]);
+void mpsad(unsigned char w[], unsigned char u[], int n, int iv);
+void mpsdv(unsigned char w[], unsigned char u[], int n, int iv, int *ir);
+void mpsmu(unsigned char w[], unsigned char u[], int n, int iv);
+void mpsqrt(unsigned char w[], unsigned char u[], unsigned char v[], int n,
+ int m);
+void mpsub(int *is, unsigned char w[], unsigned char u[], unsigned char v[],
+ int n);
+void mrqcof(float x[], float y[], float sig[], int ndata, float a[],
+ int ia[], int ma, float **alpha, float beta[], float *chisq,
+ void (*funcs)(float, float [], float *, float [], int));
+void mrqmin(float x[], float y[], float sig[], int ndata, float a[],
+ int ia[], int ma, float **covar, float **alpha, float *chisq,
+ void (*funcs)(float, float [], float *, float [], int), float *alamda);
+void newt(float x[], int n, int *check,
+ void (*vecfunc)(int, float [], float []));
+void odeint(float ystart[], int nvar, float x1, float x2,
+ float eps, float h1, float hmin, int *nok, int *nbad,
+ void (*derivs)(float, float [], float []),
+ void (*rkqs)(float [], float [], int, float *, float, float,
+ float [], float *, float *, void (*)(float, float [], float [])));
+void orthog(int n, float anu[], float alpha[], float beta[], float a[],
+ float b[]);
+void pade(double cof[], int n, float *resid);
+void pccheb(float d[], float c[], int n);
+void pcshft(float a, float b, float d[], int n);
+void pearsn(float x[], float y[], unsigned long n, float *r, float *prob,
+ float *z);
+void period(float x[], float y[], int n, float ofac, float hifac,
+ float px[], float py[], int np, int *nout, int *jmax, float *prob);
+void piksr2(int n, float arr[], float brr[]);
+void piksrt(int n, float arr[]);
+void pinvs(int ie1, int ie2, int je1, int jsf, int jc1, int k,
+ float ***c, float **s);
+float plgndr(int l, int m, float x);
+float poidev(float xm, long *idum);
+void polcoe(float x[], float y[], int n, float cof[]);
+void polcof(float xa[], float ya[], int n, float cof[]);
+void poldiv(float u[], int n, float v[], int nv, float q[], float r[]);
+void polin2(float x1a[], float x2a[], float **ya, int m, int n,
+ float x1, float x2, float *y, float *dy);
+void polint(float xa[], float ya[], int n, float x, float *y, float *dy);
+void powell(float p[], float **xi, int n, float ftol, int *iter, float *fret,
+ float (*func)(float []));
+void predic(float data[], int ndata, float d[], int m, float future[], int nfut);
+float probks(float alam);
+void psdes(unsigned long *lword, unsigned long *irword);
+void pwt(float a[], unsigned long n, int isign);
+void pwtset(int n);
+float pythag(float a, float b);
+void pzextr(int iest, float xest, float yest[], float yz[], float dy[],
+ int nv);
+float qgaus(float (*func)(float), float a, float b);
+void qrdcmp(float **a, int n, float *c, float *d, int *sing);
+float qromb(float (*func)(float), float a, float b);
+float qromo(float (*func)(float), float a, float b,
+ float (*choose)(float (*)(float), float, float, int));
+void qroot(float p[], int n, float *b, float *c, float eps);
+void qrsolv(float **a, int n, float c[], float d[], float b[]);
+void qrupdt(float **r, float **qt, int n, float u[], float v[]);
+float qsimp(float (*func)(float), float a, float b);
+float qtrap(float (*func)(float), float a, float b);
+float quad3d(float (*func)(float, float, float), float x1, float x2);
+void quadct(float x, float y, float xx[], float yy[], unsigned long nn,
+ float *fa, float *fb, float *fc, float *fd);
+void quadmx(float **a, int n);
+void quadvl(float x, float y, float *fa, float *fb, float *fc, float *fd);
+float ran0(long *idum);
+float ran1(long *idum);
+float ran2(long *idum);
+float ran3(long *idum);
+float ran4(long *idum);
+void rank(unsigned long n, unsigned long indx[], unsigned long irank[]);
+void ranpt(float pt[], float regn[], int n);
+void ratint(float xa[], float ya[], int n, float x, float *y, float *dy);
+void ratlsq(double (*fn)(double), double a, double b, int mm, int kk,
+ double cof[], double *dev);
+double ratval(double x, double cof[], int mm, int kk);
+float rc(float x, float y);
+float rd(float x, float y, float z);
+void realft(float data[], unsigned long n, int isign);
+void rebin(float rc, int nd, float r[], float xin[], float xi[]);
+void red(int iz1, int iz2, int jz1, int jz2, int jm1, int jm2, int jmf,
+ int ic1, int jc1, int jcf, int kc, float ***c, float **s);
+void relax(double **u, double **rhs, int n);
+void relax2(double **u, double **rhs, int n);
+void resid(double **res, double **u, double **rhs, int n);
+float revcst(float x[], float y[], int iorder[], int ncity, int n[]);
+void reverse(int iorder[], int ncity, int n[]);
+float rf(float x, float y, float z);
+float rj(float x, float y, float z, float p);
+void rk4(float y[], float dydx[], int n, float x, float h, float yout[],
+ void (*derivs)(float, float [], float []));
+void rkck(float y[], float dydx[], int n, float x, float h,
+ float yout[], float yerr[], void (*derivs)(float, float [], float []));
+void rkdumb(float vstart[], int nvar, float x1, float x2, int nstep,
+ void (*derivs)(float, float [], float []));
+void rkqs(float y[], float dydx[], int n, float *x,
+ float htry, float eps, float yscal[], float *hdid, float *hnext,
+ void (*derivs)(float, float [], float []));
+void rlft3(float ***data, float **speq, unsigned long nn1,
+ unsigned long nn2, unsigned long nn3, int isign);
+float rofunc(float b);
+void rotate(float **r, float **qt, int n, int i, float a, float b);
+void rsolv(float **a, int n, float d[], float b[]);
+void rstrct(double **uc, double **uf, int nc);
+float rtbis(float (*func)(float), float x1, float x2, float xacc);
+float rtflsp(float (*func)(float), float x1, float x2, float xacc);
+float rtnewt(void (*funcd)(float, float *, float *), float x1, float x2,
+ float xacc);
+float rtsafe(void (*funcd)(float, float *, float *), float x1, float x2,
+ float xacc);
+float rtsec(float (*func)(float), float x1, float x2, float xacc);
+void rzextr(int iest, float xest, float yest[], float yz[], float dy[], int nv);
+void savgol(float c[], int np, int nl, int nr, int ld, int m);
+void score(float xf, float y[], float f[]);
+void scrsho(float (*fx)(float));
+float select(unsigned long k, unsigned long n, float arr[]);
+float selip(unsigned long k, unsigned long n, float arr[]);
+void shell(unsigned long n, float a[]);
+void shoot(int n, float v[], float f[]);
+void shootf(int n, float v[], float f[]);
+void simp1(float **a, int mm, int ll[], int nll, int iabf, int *kp,
+ float *bmax);
+void simp2(float **a, int n, int l2[], int nl2, int *ip, int kp, float *q1);
+void simp3(float **a, int i1, int k1, int ip, int kp);
+void simplx(float **a, int m, int n, int m1, int m2, int m3, int *icase,
+ int izrov[], int iposv[]);
+void simpr(float y[], float dydx[], float dfdx[], float **dfdy,
+ int n, float xs, float htot, int nstep, float yout[],
+ void (*derivs)(float, float [], float []));
+void sinft(float y[], int n);
+void slvsm2(double **u, double **rhs);
+void slvsml(double **u, double **rhs);
+void sncndn(float uu, float emmc, float *sn, float *cn, float *dn);
+double snrm(unsigned long n, double sx[], int itol);
+void sobseq(int *n, float x[]);
+void solvde(int itmax, float conv, float slowc, float scalv[],
+ int indexv[], int ne, int nb, int m, float **y, float ***c, float **s);
+void sor(double **a, double **b, double **c, double **d, double **e,
+ double **f, double **u, int jmax, double rjac);
+void sort(unsigned long n, float arr[]);
+void sort2(unsigned long n, float arr[], float brr[]);
+void sort3(unsigned long n, float ra[], float rb[], float rc[]);
+void spctrm(FILE *fp, float p[], int m, int k, int ovrlap);
+void spear(float data1[], float data2[], unsigned long n, float *d, float *zd,
+ float *probd, float *rs, float *probrs);
+void sphbes(int n, float x, float *sj, float *sy, float *sjp, float *syp);
+void splie2(float x1a[], float x2a[], float **ya, int m, int n, float **y2a);
+void splin2(float x1a[], float x2a[], float **ya, float **y2a, int m, int n,
+ float x1, float x2, float *y);
+void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]);
+void splint(float xa[], float ya[], float y2a[], int n, float x, float *y);
+void spread(float y, float yy[], unsigned long n, float x, int m);
+void sprsax(float sa[], unsigned long ija[], float x[], float b[],
+ unsigned long n);
+void sprsin(float **a, int n, float thresh, unsigned long nmax, float sa[],
+ unsigned long ija[]);
+void sprspm(float sa[], unsigned long ija[], float sb[], unsigned long ijb[],
+ float sc[], unsigned long ijc[]);
+void sprstm(float sa[], unsigned long ija[], float sb[], unsigned long ijb[],
+ float thresh, unsigned long nmax, float sc[], unsigned long ijc[]);
+void sprstp(float sa[], unsigned long ija[], float sb[], unsigned long ijb[]);
+void sprstx(float sa[], unsigned long ija[], float x[], float b[],
+ unsigned long n);
+void stifbs(float y[], float dydx[], int nv, float *xx,
+ float htry, float eps, float yscal[], float *hdid, float *hnext,
+ void (*derivs)(float, float [], float []));
+void stiff(float y[], float dydx[], int n, float *x,
+ float htry, float eps, float yscal[], float *hdid, float *hnext,
+ void (*derivs)(float, float [], float []));
+void stoerm(float y[], float d2y[], int nv, float xs,
+ float htot, int nstep, float yout[],
+ void (*derivs)(float, float [], float []));
+void svbksb(float **u, float w[], float **v, int m, int n, float b[],
+ float x[]);
+void svdcmp(float **a, int m, int n, float w[], float **v);
+void svdfit(float x[], float y[], float sig[], int ndata, float a[],
+ int ma, float **u, float **v, float w[], float *chisq,
+ void (*funcs)(float, float [], int));
+void svdvar(float **v, int ma, float w[], float **cvm);
+void toeplz(float r[], float x[], float y[], int n);
+void tptest(float data1[], float data2[], unsigned long n, float *t, float *prob);
+void tqli(float d[], float e[], int n, float **z);
+float trapzd(float (*func)(float), float a, float b, int n);
+void tred2(float **a, int n, float d[], float e[]);
+void tridag(float a[], float b[], float c[], float r[], float u[],
+ unsigned long n);
+float trncst(float x[], float y[], int iorder[], int ncity, int n[]);
+void trnspt(int iorder[], int ncity, int n[]);
+void ttest(float data1[], unsigned long n1, float data2[], unsigned long n2,
+ float *t, float *prob);
+void tutest(float data1[], unsigned long n1, float data2[], unsigned long n2,
+ float *t, float *prob);
+void twofft(float data1[], float data2[], float fft1[], float fft2[],
+ unsigned long n);
+void vander(double x[], double w[], double q[], int n);
+void vegas(float regn[], int ndim, float (*fxn)(float [], float), int init,
+ unsigned long ncall, int itmx, int nprn, float *tgral, float *sd,
+ float *chi2a);
+void voltra(int n, int m, float t0, float h, float *t, float **f,
+ float (*g)(int, float), float (*ak)(int, int, float, float));
+void wt1(float a[], unsigned long n, int isign,
+ void (*wtstep)(float [], unsigned long, int));
+void wtn(float a[], unsigned long nn[], int ndim, int isign,
+ void (*wtstep)(float [], unsigned long, int));
+void wwghts(float wghts[], int n, float h,
+ void (*kermom)(double [], double ,int));
+int zbrac(float (*func)(float), float *x1, float *x2);
+void zbrak(float (*fx)(float), float x1, float x2, int n, float xb1[],
+ float xb2[], int *nb);
+float zbrent(float (*func)(float), float x1, float x2, float tol);
+void zrhqr(float a[], int m, float rtr[], float rti[]);
+float zriddr(float (*func)(float), float x1, float x2, float xacc);
+void zroots(fcomplex a[], int m, fcomplex roots[], int polish);
+
+#endif /* _NR_H_ */
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/nrutil.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/nrutil.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,321 @@
+/* CAUTION: This is the ANSI C (only) version of the Numerical Recipes
+ utility file nrutil.c. Do not confuse this file with the same-named
+ file nrutil.c that is supplied in the 'misc' subdirectory.
+ *That* file is the one from the book, and contains both ANSI and
+ traditional K&R versions, along with #ifdef macros to select the
+ correct version. *This* file contains only ANSI C. */
+
+#include
+#include
+#include
+#define NR_END 1
+#define FREE_ARG char*
+
+void nrerror(char error_text[])
+/* Numerical Recipes standard error handler */
+{
+ fprintf(stderr,"Numerical Recipes run-time error...\n");
+ fprintf(stderr,"%s\n",error_text);
+ fprintf(stderr,"...now exiting to system...\n");
+ exit(1);
+}
+
+float *vector(long nl, long nh)
+/* allocate a float vector with subscript range v[nl..nh] */
+{
+ float *v;
+
+ v=(float *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(float)));
+ if (!v) nrerror("allocation failure in vector()");
+ return v-nl+NR_END;
+}
+
+int *ivector(long nl, long nh)
+/* allocate an int vector with subscript range v[nl..nh] */
+{
+ int *v;
+
+ v=(int *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(int)));
+ if (!v) nrerror("allocation failure in ivector()");
+ return v-nl+NR_END;
+}
+
+unsigned char *cvector(long nl, long nh)
+/* allocate an unsigned char vector with subscript range v[nl..nh] */
+{
+ unsigned char *v;
+
+ v=(unsigned char *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(unsigned char)));
+ if (!v) nrerror("allocation failure in cvector()");
+ return v-nl+NR_END;
+}
+
+unsigned long *lvector(long nl, long nh)
+/* allocate an unsigned long vector with subscript range v[nl..nh] */
+{
+ unsigned long *v;
+
+ v=(unsigned long *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(long)));
+ if (!v) nrerror("allocation failure in lvector()");
+ return v-nl+NR_END;
+}
+
+double *dvector(long nl, long nh)
+/* allocate a double vector with subscript range v[nl..nh] */
+{
+ double *v;
+
+ v=(double *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(double)));
+ if (!v) nrerror("allocation failure in dvector()");
+ return v-nl+NR_END;
+}
+
+float **matrix(long nrl, long nrh, long ncl, long nch)
+/* allocate a float matrix with subscript range m[nrl..nrh][ncl..nch] */
+{
+ long i, nrow=nrh-nrl+1,ncol=nch-ncl+1;
+ float **m;
+
+ /* allocate pointers to rows */
+ m=(float **) malloc((size_t)((nrow+NR_END)*sizeof(float*)));
+ if (!m) nrerror("allocation failure 1 in matrix()");
+ m += NR_END;
+ m -= nrl;
+
+ /* allocate rows and set pointers to them */
+ m[nrl]=(float *) malloc((size_t)((nrow*ncol+NR_END)*sizeof(float)));
+ if (!m[nrl]) nrerror("allocation failure 2 in matrix()");
+ m[nrl] += NR_END;
+ m[nrl] -= ncl;
+
+ for(i=nrl+1;i<=nrh;i++) m[i]=m[i-1]+ncol;
+
+ /* return pointer to array of pointers to rows */
+ return m;
+}
+
+double **dmatrix(long nrl, long nrh, long ncl, long nch)
+/* allocate a double matrix with subscript range m[nrl..nrh][ncl..nch] */
+{
+ long i, nrow=nrh-nrl+1,ncol=nch-ncl+1;
+ double **m;
+
+ /* allocate pointers to rows */
+ m=(double **) malloc((size_t)((nrow+NR_END)*sizeof(double*)));
+ if (!m) nrerror("allocation failure 1 in matrix()");
+ m += NR_END;
+ m -= nrl;
+
+ /* allocate rows and set pointers to them */
+ m[nrl]=(double *) malloc((size_t)((nrow*ncol+NR_END)*sizeof(double)));
+ if (!m[nrl]) nrerror("allocation failure 2 in matrix()");
+ m[nrl] += NR_END;
+ m[nrl] -= ncl;
+
+ for(i=nrl+1;i<=nrh;i++) m[i]=m[i-1]+ncol;
+
+ /* return pointer to array of pointers to rows */
+ return m;
+}
+
+int **imatrix(long nrl, long nrh, long ncl, long nch)
+/* allocate a int matrix with subscript range m[nrl..nrh][ncl..nch] */
+{
+ long i, nrow=nrh-nrl+1,ncol=nch-ncl+1;
+ int **m;
+
+ /* allocate pointers to rows */
+ m=(int **) malloc((size_t)((nrow+NR_END)*sizeof(int*)));
+ if (!m) nrerror("allocation failure 1 in matrix()");
+ m += NR_END;
+ m -= nrl;
+
+
+ /* allocate rows and set pointers to them */
+ m[nrl]=(int *) malloc((size_t)((nrow*ncol+NR_END)*sizeof(int)));
+ if (!m[nrl]) nrerror("allocation failure 2 in matrix()");
+ m[nrl] += NR_END;
+ m[nrl] -= ncl;
+
+ for(i=nrl+1;i<=nrh;i++) m[i]=m[i-1]+ncol;
+
+ /* return pointer to array of pointers to rows */
+ return m;
+}
+
+float **submatrix(float **a, long oldrl, long oldrh, long oldcl, long oldch,
+ long newrl, long newcl)
+/* point a submatrix [newrl..][newcl..] to a[oldrl..oldrh][oldcl..oldch] */
+{
+ long i,j,nrow=oldrh-oldrl+1,ncol=oldcl-newcl;
+ float **m;
+
+ /* allocate array of pointers to rows */
+ m=(float **) malloc((size_t) ((nrow+NR_END)*sizeof(float*)));
+ if (!m) nrerror("allocation failure in submatrix()");
+ m += NR_END;
+ m -= newrl;
+
+ /* set pointers to rows */
+ for(i=oldrl,j=newrl;i<=oldrh;i++,j++) m[j]=a[i]+ncol;
+
+ /* return pointer to array of pointers to rows */
+ return m;
+}
+
+float **convert_matrix(float *a, long nrl, long nrh, long ncl, long nch)
+/* allocate a float matrix m[nrl..nrh][ncl..nch] that points to the matrix
+declared in the standard C manner as a[nrow][ncol], where nrow=nrh-nrl+1
+and ncol=nch-ncl+1. The routine should be called with the address
+&a[0][0] as the first argument. */
+{
+ long i,j,nrow=nrh-nrl+1,ncol=nch-ncl+1;
+ float **m;
+
+ /* allocate pointers to rows */
+ m=(float **) malloc((size_t) ((nrow+NR_END)*sizeof(float*)));
+ if (!m) nrerror("allocation failure in convert_matrix()");
+ m += NR_END;
+ m -= nrl;
+
+ /* set pointers to rows */
+ m[nrl]=a-ncl;
+ for(i=1,j=nrl+1;i (dmaxarg2) ?\
+ (dmaxarg1) : (dmaxarg2))
+
+static double dminarg1,dminarg2;
+#define DMIN(a,b) (dminarg1=(a),dminarg2=(b),(dminarg1) < (dminarg2) ?\
+ (dminarg1) : (dminarg2))
+
+static float maxarg1,maxarg2;
+#define FMAX(a,b) (maxarg1=(a),maxarg2=(b),(maxarg1) > (maxarg2) ?\
+ (maxarg1) : (maxarg2))
+
+static float minarg1,minarg2;
+#define FMIN(a,b) (minarg1=(a),minarg2=(b),(minarg1) < (minarg2) ?\
+ (minarg1) : (minarg2))
+
+static long lmaxarg1,lmaxarg2;
+#define LMAX(a,b) (lmaxarg1=(a),lmaxarg2=(b),(lmaxarg1) > (lmaxarg2) ?\
+ (lmaxarg1) : (lmaxarg2))
+
+static long lminarg1,lminarg2;
+#define LMIN(a,b) (lminarg1=(a),lminarg2=(b),(lminarg1) < (lminarg2) ?\
+ (lminarg1) : (lminarg2))
+
+static int imaxarg1,imaxarg2;
+#define IMAX(a,b) (imaxarg1=(a),imaxarg2=(b),(imaxarg1) > (imaxarg2) ?\
+ (imaxarg1) : (imaxarg2))
+
+static int iminarg1,iminarg2;
+#define IMIN(a,b) (iminarg1=(a),iminarg2=(b),(iminarg1) < (iminarg2) ?\
+ (iminarg1) : (iminarg2))
+
+#define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a))
+
+void nrerror(char error_text[]);
+float *vector(long nl, long nh);
+int *ivector(long nl, long nh);
+unsigned char *cvector(long nl, long nh);
+unsigned long *lvector(long nl, long nh);
+double *dvector(long nl, long nh);
+float **matrix(long nrl, long nrh, long ncl, long nch);
+double **dmatrix(long nrl, long nrh, long ncl, long nch);
+int **imatrix(long nrl, long nrh, long ncl, long nch);
+float **submatrix(float **a, long oldrl, long oldrh, long oldcl, long oldch,
+ long newrl, long newcl);
+float **convert_matrix(float *a, long nrl, long nrh, long ncl, long nch);
+double **convert_dmatrix(double *a, long nrl, long nrh, long ncl, long nch);
+float ***f3tensor(long nrl, long nrh, long ncl, long nch, long ndl, long ndh);
+void free_vector(float *v, long nl, long nh);
+void free_ivector(int *v, long nl, long nh);
+void free_cvector(unsigned char *v, long nl, long nh);
+void free_lvector(unsigned long *v, long nl, long nh);
+void free_dvector(double *v, long nl, long nh);
+void free_matrix(float **m, long nrl, long nrh, long ncl, long nch);
+void free_dmatrix(double **m, long nrl, long nrh, long ncl, long nch);
+void free_imatrix(int **m, long nrl, long nrh, long ncl, long nch);
+void free_submatrix(float **b, long nrl, long nrh, long ncl, long nch);
+void free_convert_matrix(float **b, long nrl, long nrh, long ncl, long nch);
+void free_convert_dmatrix(double **b, long nrl, long nrh, long ncl, long nch);
+void free_f3tensor(float ***t, long nrl, long nrh, long ncl, long nch,
+ long ndl, long ndh);
+
+#endif /* _NR_UTILS_H_ */
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/quickscore.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/private/quickscore.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,76 @@
+function prob = quickscore(fpos, fneg, inhibit, prior, leak)
+% QUICKSCORE Heckerman's algorithm for BN2O networks.
+% prob = quickscore(fpos, fneg, inhibit, prior, leak)
+%
+% Consider a BN2O (Binary Node 2-layer Noisy-or) network such as QMR with
+% dieases on the top and findings on the bottom. (We assume all findings are observed,
+% since hidden leaves can be marginalized away.)
+% This algorithm takes O(2^|fpos|) time to compute the marginal on all the diseases.
+%
+% Inputs:
+% fpos = the positive findings (a vector of numbers in {1, ..., Nfindings})
+% fneg = the negative findings (a vector of numbers in {1, ..., Nfindings})
+% inhibit(i,j) = inhibition prob. for finding i, disease j, or 1.0 if j is not a parent.
+% prior(j) = prior prob. disease j is ON. We assume prior(off) = 1-prior(on).
+% leak(i) = inhibition prob. for the leak node for finding i
+%
+% Output:
+% prob(d) = Pr(disease d = on | ev)
+%
+% For details, see
+% - Heckerman, "A tractable inference algorithm for diagnosing multiple diseases", UAI89.
+% - Rish and Dechter, "On the impact of causal independence", UCI tech report, 1998.
+%
+% Note that this algorithm is numerically unstable, since it adds a large number of positive and
+% negative terms and hopes that some of them exactly cancel.
+%
+% For matlab experts, use 'mex' to compile C_quickscore, which has identical behavior to this function.
+
+[nfindings ndiseases] = size(inhibit);
+
+% make the first disease be always on, for the leak term
+Pon = [1 prior(:)'];
+Poff = 1-Pon;
+Uon = [leak(:) inhibit]; % U(f,d) = Pr(f=0|d=1)
+Uoff = [leak(:) ones(nfindings, ndiseases)]; % Uoff(f,d) = Pr(f=0|d=0)
+ndiseases = ndiseases + 1;
+
+npos = length(fpos);
+post = zeros(ndiseases, 2);
+% post(d,1) = alpha Pr(d=off), post(d,2) = alpha Pr(d=m)
+
+FP = length(fpos);
+%allbits = logical(dec2bitv(0:(2^FP - 1), FP));
+allbits = logical(ind2subv(2*ones(1,FP), 1:(2^FP))-1);
+
+for si=1:2^FP
+ bits = allbits(si,:);
+ fprime = fpos(bits);
+ fmask = zeros(1, nfindings);
+ fmask(fneg)=1;
+ fmask(fprime)=1;
+ fmask = logical(fmask);
+ p = 1;
+ pterm = zeros(1, ndiseases);
+ ptermOff = zeros(1, ndiseases);
+ ptermOn = zeros(1, ndiseases);
+ for d=1:ndiseases
+ ptermOff(d) = prod(Uoff(fmask,d));
+ ptermOn(d) = prod(Uon(fmask,d));
+ pterm(d) = Poff(d)*ptermOff(d) + Pon(d)*ptermOn(d);
+ end
+ p = prod(pterm);
+ sign = (-1)^(length(fprime));
+ for d=1:ndiseases
+ myp = p / pterm(d);
+ post(d,1) = post(d,1) + sign*(myp * ptermOff(d));
+ post(d,2) = post(d,2) + sign*(myp * ptermOn(d));
+ end
+end
+
+post(:,1) = post(:,1) .* Poff(:);
+post(:,2) = post(:,2) .* Pon(:);
+post = mk_stochastic(post);
+prob = post(2:end,2)'; % skip the leak term
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/quickscore_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@quickscore_inf_engine/quickscore_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function engine = quickscore_inf_engine(inhibit, leak, prior)
+% QUICKSCORE_INF_ENGINE Exact inference for the QMR network
+% engine = quickscore_inf_engine(inhibit, leak, prior)
+%
+% We create an inference engine for QMR-like networks.
+% QMR is a bipartite graph, where the top layer contains hidden disease nodes,
+% and the bottom later contains observed finding nodes.
+% The diseases have Bernoulli CPDs, the findings noisy-or CPDs.
+% The original QMR (Quick Medical Reference) network has specific parameter values which we are not
+% allowed to release, for commercial reasons.
+%
+% inhibit(f,d) = inhibition probability on f->d arc for disease d, finding f
+% If inhibit(f,d) = 1, there is effectively no arc from d->f
+% leak(j) = inhibition prob. on leak node -> finding j arc
+% prior(i) = prob. disease i is on
+%
+% We use exact inference, which takes O(2^P) time, where P is the number of positive findings.
+% For details, see
+% - Heckerman, "A tractable inference algorithm for diagnosing multiple diseases", UAI 89.
+% - Rish and Dechter, "On the impact of causal independence", UCI tech report, 1998.
+% Note that this algorithm is numerically unstable, since it adds a large number of positive and
+% negative terms and hopes that some of them exactly cancel.
+%
+% For an interesting variational approximation, see
+% - Jaakkola and Jordan, "Variational probabilistic inference and the QMR-DT network", JAIR 10, 1999.
+%
+% See also
+% - "Loopy belief propagation for approximate inference: an empirical study",
+% K. Murphy, Y. Weiss and M. Jordan, UAI 99.
+
+engine.inhibit = inhibit;
+engine.leak = leak;
+engine.prior = prior;
+
+% store results here between enter_evidence and marginal_nodes
+engine.post = [];
+
+engine = class(engine, 'quickscore_inf_engine'); % not a child of the inf_engine class!
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+/README/1.1.1.1/Sun May 11 15:39:50 2003//
+/clq_containing_nodes.m/1.1.1.1/Wed May 29 11:59:46 2002//
+/enter_evidence.m/1.1.1.1/Wed Mar 12 10:38:00 2003//
+/marginal_difclq_nodes.m/1.1.1.1/Fri Feb 21 11:20:32 2003//
+/marginal_nodes.m/1.1.1.1/Fri Feb 21 11:13:10 2003//
+/marginal_singleclq_nodes.m/1.1.1.1/Wed Jan 29 11:23:58 2003//
+/problems.txt/1.1.1.1/Wed May 29 11:59:46 2002//
+/push.m/1.1.1.1/Mon Feb 10 15:38:04 2003//
+/push_pot_toclique.m/1.1.1.1/Wed May 29 11:59:46 2002//
+/stab_cond_gauss_inf_engine.m/1.1.1.1/Fri Mar 28 17:12:42 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@stab_cond_gauss_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/initialize_engine.m/1.1.1.1/Wed May 29 11:59:46 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@stab_cond_gauss_inf_engine/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/Old/initialize_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/Old/initialize_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+function [engine, loglik] = initialize_engine(engine)
+%initialize
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes(:);
+N = length(bnet.dag);
+
+pot_type = 'scg'
+check_for_cd_arcs([], bnet.cnodes, bnet.dag);
+
+% Evaluate CPDs with evidence, and convert to potentials
+pot = cell(1, N);
+C = length(engine.cliques);
+inited = zeros(1, C);
+clpot = cell(1, C);
+evidence = cell(1, N);
+for n=1:N
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ pot{n} = CPD_to_scgpot(bnet.CPD{e}, fam, ns, bnet.cnodes, evidence);
+ cindex = engine.clq_ass_to_node(n);
+ if inited(cindex)
+ %clpot{cindex} = direct_combine_pots(clpot{cindex}, pot{n});
+ clpot{cindex} = direct_combine_pots(pot{n}, clpot{cindex});
+ else
+ clpot{cindex} = pot{n};
+ inited(cindex) = 1;
+ end
+end
+
+for i=1:C
+ if inited(i) == 0
+ clpot{i} = scgpot([], [], [], []);
+ end
+end
+
+seppot = cell(C, C);
+% separators are is not need to initialize
+
+% collect to root (node to parents)
+for n=engine.postorder(1:end-1)
+ for p=parents(engine.jtree, n)
+ [margpot, comppot] = complement_pot(clpot{n}, engine.separator{p,n});
+ margpot = marginalize_pot(clpot{n}, engine.separator{p,n});
+ clpot{n} = comppot;
+ %seppot{p, n} = margpot;
+ clpot{p} = combine_pots(clpot{p}, margpot);
+ %clpot{p} = combine_pots(margpot, clpot{p});
+ end
+end
+
+temppot = clpot;
+%temppot = clpot{engine.root};
+for n=engine.preorder
+ for c=children(engine.jtree, n)
+ seppot{n,c} = marginalize_pot(temppot{n}, engine.separator{n,c});
+ %seppot{n,c} = marginalize_pot(clpot{n}, engine.separator{n,c});
+ %clpot{c} = direct_combine_pots(clpot{c}, seppot{n,c});
+ temppot{c} = direct_combine_pots(temppot{c}, seppot{n,c});
+ end
+end
+
+engine.clpot = clpot;
+engine.seppot = seppot;
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/README
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/README Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+% Stable conditional Gaussian inference
+% Originally written by Huang, Shan 2001
+% Fixed by Rainer Deventer 2003
+
+
+@techreport{Lauritzen99,
+ author = "S. Lauritzen and F. Jensen",
+ title = "Stable Local Computation with Conditional {G}aussian Distributions",
+ year = 1999,
+ number = "R-99-2014",
+ institution = "Dept. Math. Sciences, Aalborg Univ."
+}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/clq_containing_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/clq_containing_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function c = clq_containing_nodes(engine, nodes, fam)
+% CLQ_CONTAINING_NODES Find the lightest clique (if any) that contains the set of nodes
+% c = clq_containing_nodes(engine, nodes, family)
+%
+% If the optional 'family' argument is specified, it means nodes = family(nodes(end)).
+% (This is useful since clq_ass_to_node is not accessible to outsiders.)
+% Returns c=-1 if there is no such clique.
+
+if nargin < 3, fam = 0; else fam = 1; end
+
+if length(nodes)==1
+ c = engine.clq_ass_to_node(nodes(1));
+elseif fam
+ c = engine.clq_ass_to_node(nodes(end));
+else
+ B = engine.cliques_bitv;
+ w = engine.clique_weight;
+ clqs = find(all(B(:,nodes), 2)); % all selected columns must be 1
+ if isempty(clqs)
+ c = -1;
+ else
+ c = clqs(argmin(w(clqs)));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,260 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE enter evidence to engine including discrete and continuous evidence
+% [engine, ll] = enter_evidence(engine, evidence)
+%
+% ll is always 0, which is wrong.
+
+if ~isempty(engine.evidence)
+ bnet = bnet_from_engine(engine);
+ engine = stab_cond_gauss_inf_engine(bnet);
+ engine.evidence = evidence;
+else
+ engine.evidence = evidence;
+ bnet = bnet_from_engine(engine);
+end
+
+engine.evidence = evidence;
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes(:);
+observed = ~isemptycell(evidence);
+onodes = find(observed);
+hnodes = find(isemptycell(evidence));
+cobs = myintersect(bnet.cnodes, onodes);
+dobs = myintersect(bnet.dnodes, onodes);
+
+engine = incorporate_dis_evidence(engine, dobs, evidence);
+l = length(cobs);
+for i = 1:l
+ node = cobs(i);
+ engine = incorporate_singleconts_evidence(engine, node, evidence);
+end
+clpot = engine.clpot;
+
+clq_num = length(engine.cliques);
+for n=engine.postorder(1:end-1)
+ for p=parents(engine.jtree, n)
+ [margpot, comppot] = complement_pot(clpot{n}, engine.separator{p,n});
+ clpot{n} = comppot;
+ clpot{p} = combine_pots(clpot{p}, margpot);
+ end
+end
+
+temppot = clpot;
+for n=engine.preorder
+ for c=children(engine.jtree, n)
+ seppot{n,c} = marginalize_pot(temppot{n}, engine.separator{n,c});
+ temppot{c} = direct_combine_pots(temppot{c}, seppot{n,c});
+ end
+end
+engine.clpot = clpot;
+engine.seppot = seppot;
+
+[pot,loglik]=normalize_pot(clpot{engine.root});
+
+%%%%%%%%%%%%%%%%%%
+function engine = incorporate_dis_evidence(engine, donodes, evidence)
+l = length(donodes);
+for i=donodes(:)'
+ node = i;
+ clqid = engine.clq_ass_to_node(node);
+ pot = struct(engine.clpot{clqid});
+ ns = zeros(1, max(pot.domain));
+ ns(pot.ddom) = pot.dsizes;
+ ns(pot.cheaddom) = pot.cheadsizes;
+ ns(pot.ctaildom) = pot.ctailsizes;
+ ddom = pot.ddom;
+
+ potcarray = cell(1, pot.dsize);
+ for j =1:pot.dsize
+ tpotc = struct(pot.scgpotc{j});
+ potcarray{j} = scgcpot(tpotc.cheadsize, tpotc.ctailsize, 0, tpotc.A, tpotc.B, tpotc.C);
+ end
+
+ if length(ns(ddom)) == 1
+ matrix = pot.scgpotc;
+ else
+ matrix = reshape(pot.scgpotc,ns(ddom));
+ potcarray = reshape(potcarray, ns(ddom));
+ end
+
+ map = find_equiv_posns(node, ddom);
+ vals = cat(1, evidence{node});
+ index = mk_multi_index(length(ddom), map, vals);
+ potcarray(index{:}) = matrix(index{:});
+ potcarray = potcarray(:);
+ %keyboard;
+ engine.clpot{clqid} = scgpot(pot.ddom, pot.cheaddom, pot.ctaildom, ns, potcarray);
+end
+
+%%%%%%%%%%%%%%%%%%
+function engine = incorporate_singleconts_evidence(engine, node, evidence)
+%incorporate_singleconts_evidence incorporate evidence of 1 continuous node
+B = engine.cliques_bitv;
+clqs_containnode = find(all(B(:,node), 2)); % all selected columns must be 1
+% Every continuous node necessarily apears as head in exactly one clique,
+% which is the clique where it appears closest to the strong root. In all other
+% clique potentials where it appears, it must be a tail node.
+clq_ev_as_head = [];
+for i = clqs_containnode(:)'
+ pot = struct(engine.clpot{i});
+ if myismember(node, pot.cheaddom)
+ clq_ev_as_head = [clq_ev_as_head i];
+ break;
+ end
+end
+
+% If we will incorporate the evidence node which is head of a potential we must rearrange
+% the juntion tree by push operation until the tail of the include potential is empty
+if ~isempty(clq_ev_as_head)
+ assert(1 == length(clq_ev_as_head));
+ i = clq_ev_as_head;
+ pot = struct(engine.clpot{i});
+ while ~isempty(pot.ctaildom)
+ [engine, clqtoroot] = push(engine, i, node);
+ i = clqtoroot;
+ pot = struct(engine.clpot{i});
+ end
+ B = engine.cliques_bitv;
+ clqs_containnode = find(all(B(:,node), 2));
+end
+
+for i = clqs_containnode(:)'
+ pot = struct(engine.clpot{i});
+ if myismember(node, pot.cheaddom)
+ engine.clpot{i} = incoporate_evidence_headnode(engine.clpot{i}, node, evidence);
+ else
+ %assert(myismember(node, pot.ctaildom));
+ engine.clpot{i} = incoporate_evidence_tailnode(engine.clpot{i}, node, evidence);
+ end
+end
+
+%%%%%%%%%%%%%%%%%%
+function newscgpot = incoporate_evidence_tailnode(pot, node, evidence)
+%ENTER_EVIDENCE_TAILNODE enter the evidence of 1 tailnode of the scgpot
+newscgpot = pot;
+pot = struct(pot);
+%if isempty(pot.ctaildom)
+if ~myismember(node, pot.ctaildom)
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % In this case there is no real dependency of the head nodes %
+ % on the tail. The potential should be returned unchanged %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ return;
+end
+%newscgpot = scgpot([], [], [], []);
+assert(myismember(node, pot.ctaildom));
+ni = block(find_equiv_posns(node, pot.ctaildom), pot.ctailsizes);
+
+ctaildom = mysetdiff(pot.ctaildom, node);
+cheaddom = pot.cheaddom;
+ddom = pot.ddom;
+domain = mysetdiff(pot.domain, node);
+dsize = pot.dsize;
+ns = zeros(1, max(pot.domain));
+ns(pot.ddom) = pot.dsizes;
+ns(pot.cheaddom) = pot.cheadsizes;
+ns(pot.ctaildom) = pot.ctailsizes;
+cheadsizes = pot.cheadsizes;
+cheadsize = pot.cheadsize;
+ctailsizes = ns(ctaildom);
+ctailsize = sum(ns(ctaildom));
+
+potarray = cell(1, dsize);
+for i=1:dsize
+ potc = struct(pot.scgpotc{i});
+ B = potc.B;
+ A = potc.A + B(:, ni)*evidence{node};
+ B(:, ni) = [];
+ potarray{i} = scgcpot(cheadsize, ctailsize, potc.p, A, B, potc.C);
+end
+
+newscgpot = scgpot(ddom, cheaddom, ctaildom, ns, potarray);
+
+%%%%%%%%%%%%%%%%
+function newscgpot = incoporate_evidence_headnode(pot, node, evidence)
+%ENTER_EVIDENCE_HEADNODE
+pot = struct(pot);
+y2 = evidence{node};
+assert(myismember(node, pot.cheaddom));
+assert(isempty(pot.ctaildom));
+ddom = pot.ddom;
+cheaddom = mysetdiff(pot.cheaddom, node);
+ctaildom = pot.ctaildom;
+dsize = pot.dsize;
+domain = mysetdiff(pot.domain, node);
+
+ns = zeros(1, max(pot.domain));
+ns(pot.ddom) = pot.dsizes;
+ns(pot.cheaddom) = pot.cheadsizes;
+ns(pot.ctaildom) = pot.ctailsizes;
+ctailsizes = ns(ctaildom);
+ctailsize = sum(ctailsizes);
+cheadsizes = ns(cheaddom);
+cheadsize = sum(cheadsizes);
+onodesize = ns(node);
+
+p = zeros(1,dsize);
+A1 = zeros(cheadsize, dsize);
+A2 = zeros(onodesize, dsize);
+C11 = zeros(cheadsize, cheadsize, dsize);
+C12 = zeros(cheadsize, onodesize, dsize);
+C21 = zeros(onodesize, cheadsize, dsize);
+C22 = zeros(onodesize, onodesize, dsize);
+ZM = zeros(onodesize, onodesize);
+
+n1i = block(find_equiv_posns(cheaddom, pot.cheaddom), pot.cheadsizes);
+n2i = block(find_equiv_posns(node, pot.cheaddom), pot.cheadsizes);
+
+indic = 0;
+for i=1:dsize
+ potc = struct(pot.scgpotc{i});
+ p(i) = potc.p;
+ if ~isempty(n1i)
+ A1(:,i) = potc.A(n1i);
+ end
+ if ~isempty(n2i)
+ A2(:,i) = potc.A(n2i);
+ end
+ C11(:,:,i) = potc.C(n1i, n1i);
+ C12(:,:,i) = potc.C(n1i, n2i);
+ C21(:,:,i) = potc.C(n2i, n1i);
+ C22(:,:,i) = potc.C(n2i, n2i);
+ if isequal(0, C22(:,:,i)) & isequal(evidence{node}, A2(:, i))
+ indic = i;
+ end
+end
+
+np = zeros(1,dsize);
+nA = zeros(cheadsize, dsize);
+nC = zeros(cheadsize, cheadsize, dsize);
+
+if indic
+ np(:) = 0;
+ np(indic) = p(indic);
+ nA = A1;
+ nC = C11;
+else
+ for i=1:dsize
+ if isequal(0, C22(:,:,i))
+ p(i) = 0;
+ nA(:, i) = A1(:, i);
+ nC(:,:,i) = C11(:,:,i);
+ else
+ sq = (y2 - A2(:,i))' * inv(C22(:,:,i)) * (y2 - A2(:,i));
+ ex = exp(-0.5*sq);
+ %np(i) = p(i) * ex / ( (2 * pi)^(-onodesize/2) * sqrt(det(C22(:,:,i))) );
+ np(i) = p(i) * ex / ( (2 * pi)^(onodesize/2) * sqrt(det(C22(:,:,i))) );
+ nA(:,i) = A1(:,i) + C12(:,:,i) * inv(C22(:,:,i)) * (y2 - A2(:,i));
+ tmp1 = C12(:,:,i) * inv(C22(:,:,i)) * C21(:,:,i);
+ nC(:,:,i) = C11(:,:,i) - tmp1;
+ end
+ end
+end
+
+scpot = cell(1, dsize);
+W = zeros(cheadsize,ctailsize);
+for i=1:dsize
+ scpot{i} = scgcpot(cheadsize, ctailsize, np(i), nA(:,i), W, nC(:,:,i));
+end
+ns(node) = 0;
+newscgpot = scgpot(ddom, cheaddom, ctaildom, ns, scpot);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/marginal_difclq_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/marginal_difclq_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+function marginal = marginal_difclq_nodes(engine, query_nodes)
+% MARGINAL_DIFCLQ_NODES get the marginal distribution of nodes which is not in a single clique
+% marginal = marginal_difclq_nodes(engine, query_nodes)
+
+keyboard
+num_clique = length(engine.cliques);
+B = engine.cliques_bitv;
+clqs_containnodes = [];
+for i=1:length(query_nodes)
+ node = query_nodes(i);
+ tnodes = find(all(B(:, node), 2));
+ clqs_containnodes = myunion(clqs_containnodes, tnodes);
+end
+% get all cliques contains query nodes
+
+% get the minimal sub tree in junction which contains these cliques and the node closest to the root of jtree
+[subtree, nroot_node] = min_subtree_conti_nodes(engine.jtree, engine.root, clqs_containnodes);
+if ~mysubset(query_nodes, engine.cliques{nroot_node});
+ % if query nodes is not all memers of the clique closest to the root clique performe push operation
+ engine = push_tree(engine, subtree, query_nodes, nroot_node);
+end
+
+if ~(nroot_node == engine.root)
+ % if the clique closest to the root clique is not the root clique we must direct combine the
+ % potential with the potential stored in separator toward to root
+ p = parents(engine.jtree, nroot_node);
+ tpot = direct_combine_pots(engine.clpot{nroot_node}, engine.seppot{p, nroot_node});
+else
+ tpot = engine.clpot{nroot_node};
+end
+
+pot = marginalize_pot(tpot, query_nodes);
+marginal = pot_to_marginal(pot);
+marginal.T = normalise(marginal.T);
+
+
+
+function engine = push_tree(engine, tree, query_nodes, inode)
+% PUSH_TREE recursive perform push opeartion on tree
+% engine = push_tree(engine, tree, query_nodes, inode)
+
+cs = children(tree, inode);
+for i = 1:length(cs)
+ node = cs(i);
+ push_tree(engine, tree, query_nodes, node);
+ push_dom = myintersect(engine.cliques{node}, query_nodes);
+ [engine, clqtoroot] = push(engine, node, push_dom);
+end
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/marginal_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/marginal_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,77 @@
+function marginal = marginal_nodes(engine, query, add_ev)
+% MARGINAL_NODES Compute the marginal on the specified query nodes (stab_cond_gauss)
+% marginal = marginal_nodes(engine, query, add_ev)
+%
+% 'query' must be a singleton set.
+% add_ev is an optional argument; if 1, we will "inflate" the marginal of observed nodes
+% to their original size, adding 0s to the positions which contradict the evidence
+
+if nargin < 3, add_ev = 0; end
+if isempty(engine.evidence)
+ hquery = query;
+else
+ hquery = [];
+ for i = query
+ if isempty(engine.evidence{i})
+ hquery = [hquery i];
+ end
+ end
+end
+
+bnet = bnet_from_engine(engine);
+
+nclq = length(engine.cliques);
+clique = 0;
+for i = 1:nclq
+ if mysubset(hquery, engine.cliques{i})
+ pot = struct(engine.clpot{i});
+ %if mysubset(hquery, pot.cheaddom) | mysubset(hquery, pot.ddom)
+ if mysubset(hquery, pot.domain)
+ clique = i;
+ break;
+ end
+ end
+end
+
+if isempty(hquery)
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % If all requested variables are observed, no query is necessary %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ marginal.mu = [];
+ marginal.Sigma = [];
+ marginal.T = 1.0;
+ marginal.domain = query;
+else
+ if clique == 0
+ marginal = marginal_difclq_nodes(engine, hquery);
+ else
+ marginal = marginal_singleclq_nodes(engine, clique, hquery);
+ end
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % Change the format of output, so that it is identical to the %
+ % format obtained by the same request for the junction-tree %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ marginal.domain = query;
+ bnet = bnet_from_engine(engine);
+ dquery = myintersect(bnet.dnodes,hquery);
+ ns = bnet.node_sizes(dquery);
+ if length(ns) == 0
+ marginal.T = 1;
+ else
+ if length(ns) == 1
+ ns = [1 ns];
+ end
+ marginal.T = reshape(marginal.T,ns);
+ end
+end
+if add_ev
+ bnet = bnet_from_engine(engine);
+ %marginal = add_ev_to_dmarginal(marginal, engine.evidence, bnet.node_sizes);
+ marginal = add_evidence_to_gmarginal(marginal, engine.evidence, bnet.node_sizes, bnet.cnodes);
+end
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/marginal_singleclq_nodes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/marginal_singleclq_nodes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function marginal = marginal_singleclq_nodes(engine, i, query)
+% MARGINAL_SINGLECLQ_NODES get the marginal distribution of nodes which is in a single clique
+% marginal = marginal_singleclq_nodes(engine, i, query)
+
+pot = struct(engine.clpot{i});
+if isempty(pot.ctaildom)
+ if i ~= engine.root
+ p = parents(engine.jtree, i);
+ tpot = direct_combine_pots(engine.clpot{i}, engine.seppot{p, i});
+ else
+ tpot = engine.clpot{i};
+ end
+ pot = marginalize_pot(tpot, query);
+
+ marginal = pot_to_marginal(pot);
+ marginal.T = normalise(marginal.T);
+else
+ [engine, clqtoroot] = push(engine, i, query);
+ if clqtoroot == engine.root
+ tpot = engine.clpot{clqtoroot};
+ else
+ p = parents(engine.jtree, clqtoroot);
+ tpot = direct_combine_pots(engine.clpot{clqtoroot}, engine.seppot{p, clqtoroot});
+ end
+ pot = marginalize_pot(tpot, query);
+
+ marginal = pot_to_marginal(pot);
+ marginal.T = normalise(marginal.T);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/problems.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/problems.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,76 @@
+PROBLEMS WITH STAB_COND_GAUSS_INF_ENGINE
+
+
+- enter_evidence always returns ll=0
+ (I set ll=0 since it is not computed)
+
+- fails on scg_3node, probably because the engine needs to be
+re-initialized every time before enter_evidence is called, not just
+when the engine is constructed.
+
+??? Error using ==> assert
+assertion violated:
+
+K>> dbstack
+dbstack
+> In /home/eecs/murphyk/matlab/BNT/HMM/assert.m at line 9
+ In /home/eecs/murphyk/matlab/BNT/examples/static/SCG/scg_3node.m at line 45
+
+
+
+- crashes on scg3
+
+Error in ==> /home/eecs/murphyk/matlab/BNT/inference/static/@stab_cond_gauss_inf_engine/stab_cond_gauss_inf_engine.m
+On line 77 ==> clpot{cindex} = direct_combine_pots(pot{n}, clpot{cindex});
+
+K>> dbstack
+dbstack
+> In /home/eecs/murphyk/matlab/BNT/inference/static/@stab_cond_gauss_inf_engine/stab_cond_gauss_inf_engine.m at line 77
+ In /home/eecs/murphyk/matlab/BNT/examples/static/SCG/scg3.m at line 41
+K>>
+
+
+
+
+
+- fails on scg1 and scg2
+
+Warning: One or more output arguments not assigned during call to 'min_subtree_conti_nodes (nearsest_node2)'.
+Warning in ==> /home/eecs/murphyk/matlab/BNT/graph/min_subtree_conti_nodes.m (nearsest_node2)
+On line 60 ==> nea_node = nearsest_node2(tree, nodes, n);
+
+K>> dbstack
+dbstack
+> In /home/eecs/murphyk/matlab/BNT/graph/min_subtree_conti_nodes.m (nearsest_node2) at line 60
+ In /home/eecs/murphyk/matlab/BNT/graph/min_subtree_conti_nodes.m (nearest_node) at line 50
+ In /home/eecs/murphyk/matlab/BNT/graph/min_subtree_conti_nodes.m at line 11
+ In /home/eecs/murphyk/matlab/BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_difclq_nodes.m at line 17
+ In /home/eecs/murphyk/matlab/BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_nodes.m at line 23
+ In /home/eecs/murphyk/matlab/BNT/examples/static/SCG/scg1.m at line 42
+
+
+
+
+
+- This code fragment, from BNT/graph/min_subtree_conti_nodes, is clearly redundant
+
+function nea_node = nearest_node(tree, root, nodes)
+%get the nearest node to the root in the tree
+nea_node = nearsest_node2(tree, nodes, root);
+
+function nea_node = nearsest_node2(tree, nodes, inode)
+if myismember(inode, nodes)
+ nea_node = inode;
+ return;
+end
+cs = children(tree, inode);
+for i = 1:length(cs)
+ n = cs(i);
+ nea_node = nearsest_node2(tree, nodes, n);
+end
+
+
+- Some names are badly chosen. 'nearsest' is a mis-spelling. 'min_subtree_conti_nodes' should be
+'min_subtree_containing_nodes' or 'min_subtree_con_nodes'.
+
+- In general, the code needs some heavy polishing.
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/push.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/push.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function [engine, clqtoroot] = push(engine, clq, pushdom)
+%PUSH_POT push the variables in putshdom which is subset of clq to the clique toword the root and get new engine
+%pushdom is pushed variables set
+%clq is the index of the clique that pushdom belongs to
+
+clqdom = engine.cliques{clq};
+assert( mysubset(pushdom, clqdom));
+clqtoroot = parents(engine.jtree, clq);
+%sepdom = engine.separator{clq, clqtoroot};
+sepdom = engine.separator{clqtoroot, clq};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Calculate the strong marginal of the union of pushdom and and the separatordomain and %
+% the corresponding complement %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%[margpot, comppot] = complement_pot(engine.clpot{clq}, pushdom);
+newsepdom = myunion(pushdom,sepdom);
+[margpot,comppot] = complement_pot(engine.clpot{clq}, newsepdom);
+engine.clpot{clqtoroot} = direct_combine_pots(engine.clpot{clqtoroot}, margpot);
+engine.clpot{clq} = comppot;
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Calculation of the new separator and separatorpotential of the junction tree %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+engine.seppot{clqtoroot, clq} = direct_combine_pots(engine.seppot{clqtoroot, clq}, margpot);
+engine.separator{clqtoroot, clq} = myunion(engine.separator{clqtoroot, clq}, pushdom);
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Add pushdomain to the clique towards the root %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+engine.cliques{clqtoroot} = myunion(engine.cliques{clqtoroot}, pushdom);
+
+num_cliques = length(engine.cliques);
+B = sparse(num_cliques, 1);
+for i=1:num_cliques
+ B(i, engine.cliques{i}) = 1;
+end
+engine.cliques_bitv = B;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/push_pot_toclique.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/push_pot_toclique.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function engine = push_pot_toclique(engine, clqtarget, clq, nodes)
+% PUSH_POT push the variables in putshdom which is subset of clq to the target clique toword the root and get new engine
+% engine = push_pot_toclique(engine, clqtarget, clq, nodes)
+[engine, clqtoroot] = push_pot(engine, clq, nodes)
+while clqtoroot ~= clqtarget
+ [engine, clqtoroot] = push_pot(engine, clqtoroot, nodes)
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/stab_cond_gauss_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@stab_cond_gauss_inf_engine/stab_cond_gauss_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,178 @@
+function engine = stab_cond_gauss_inf_engine(bnet)
+% STAB_COND_GAUSS_INF_ENGINE Junction tree using stable CG potentials
+% engine = cond_gauss_inf_engine(bnet)
+%
+% This class was written by Shan Huang (shan.huang@intel.com) 2001
+% and fixed by Rainer Deventer deventer@informatik.uni-erlangen.de March 2003
+N = length(bnet.dag);
+clusters = {};
+root = N;
+stages = { 1:N };
+onodes = [];
+engine = init_fields;
+engine.evidence = [];
+engine = class(engine, 'stab_cond_gauss_inf_engine', inf_engine(bnet));
+
+ns = bnet.node_sizes(:);
+ns(onodes) = 1; % observed nodes have only 1 possible value
+
+%[engine.jtree, dummy, engine.cliques, B, w, elim_order, moral_edges, fill_in_edges, strong] = ...
+% dag_to_jtree(bnet, onodes, stages, clusters);
+
+
+partial_order = determine_elim_constraints(bnet, onodes);
+strong = ~isempty(partial_order);
+stages = {};
+clusters = {};
+[engine.jtree, dummy_root, engine.cliques, B, w, elim_order] =
+ graph_to_jtree(moralize(bnet.dag), ns, partial_order, stages, clusters);
+
+
+engine.cliques_bitv = B;
+engine.clique_weight = w;
+C = length(engine.cliques);
+engine.clpot = cell(1,C);
+
+% A node can be a member of many cliques, but is assigned to exactly one, to avoid
+% double-counting its CPD. We assign node i to clique c if c is the "lightest" clique that
+% contains i's family, so it can accomodate its CPD.
+
+engine.clq_ass_to_node = zeros(1, N);
+num_cliques = length(engine.cliques);
+for i=1:N
+ clqs_containing_family = find(all(B(:,family(bnet.dag, i)), 2)); % all selected columns must be 1
+ c = clqs_containing_family(argmin(w(clqs_containing_family)));
+ engine.clq_ass_to_node(i) = c;
+end
+
+% Compute the separators between connected cliques.
+[is,js] = find(engine.jtree > 0);
+engine.separator = cell(num_cliques, num_cliques);
+for k=1:length(is)
+ i = is(k); j = js(k);
+ engine.separator{i,j} = find(B(i,:) & B(j,:)); % intersect(cliques{i}, cliques{j});
+end
+%keyboard;
+engine.seppot = cell(C,C);
+
+pot_type = 'scg';
+check_for_cd_arcs([], bnet.cnodes, bnet.dag);
+
+% Make the jtree rooted, so there is a fixed message passing order.
+if strong
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % Start the search for the strong root at the clique with the %
+ % highest number. %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ root = length(engine.cliques);
+ root_found = 0;
+
+ while ((~root_found) & (root >= 1))
+ root_found = test_strong_root(engine.jtree,engine.cliques,bnet.dnodes,root);
+ if ~root_found
+ root = root - 1;
+ end
+ end
+ assert(root > 0)
+ engine.root = root;
+ % the last clique is guaranteed to be a strong root
+ %engine.root = length(engine.cliques);
+else
+ % jtree_dbn_inf_engine requires the root to contain the interface.
+ % This may conflict with the strong root requirement! *********** BUG *************
+ engine.root = clq_containing_nodes(engine, root);
+ if engine.root <= 0
+ error(['no clique contains ' num2str(root)]);
+ end
+end
+
+[engine.jtree, engine.preorder, engine.postorder] = mk_rooted_tree(engine.jtree, engine.root);
+
+% Evaluate CPDs with evidence, and convert to potentials
+pot = cell(1, N);
+inited = zeros(1, C);
+clpot = cell(1, C);
+evidence = cell(1, N);
+for n=1:N
+ fam = family(bnet.dag, n);
+ e = bnet.equiv_class(n);
+ %pot{n} = CPD_to_scgpot(bnet.CPD{e}, fam, ns, bnet.cnodes, evidence);
+ pot{n} = convert_to_pot(bnet.CPD{e}, pot_type, fam(:), evidence);
+ cindex = engine.clq_ass_to_node(n);
+ if inited(cindex)
+ clpot{cindex} = direct_combine_pots(pot{n}, clpot{cindex});
+ else
+ clpot{cindex} = pot{n};
+ inited(cindex) = 1;
+ end
+end
+
+for i=1:C
+ if inited(i) == 0
+ clpot{i} = scgpot([], [], [], []);
+ end
+end
+
+seppot = cell(C, C);
+% separators are is not need to initialize
+
+% collect to root (node to parents)
+% Unlike the HUGIN architecture the complements are stored in the cliques during COLLECT
+% and the separators are not playing a specific role during this process
+for n=engine.postorder(1:end-1)
+ for p=parents(engine.jtree, n)
+ if ~isempty(engine.separator{p,n})
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % The empty case might happen for unlinked nodes, i.e. the DAG is not %
+ % a single tree, but a forest %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ [margpot, comppot] = complement_pot(clpot{n}, engine.separator{p,n});
+ clpot{n} = comppot;
+ clpot{p} = combine_pots(clpot{p}, margpot);
+ end
+ end
+end
+
+% distribute message from root
+% We have not to store the weak clique marginals and keep the original complement potentials.
+% This is a minor variation of HUGIN architecture.
+temppot = clpot;
+for n=engine.preorder
+ for c=children(engine.jtree, n)
+ seppot{n,c} = marginalize_pot(temppot{n}, engine.separator{n,c});
+ temppot{c} = direct_combine_pots(temppot{c}, seppot{n,c});
+ end
+end
+
+engine.clpot = clpot;
+engine.seppot = seppot;
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% init_fields() %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+function engine = init_fields()
+
+engine.evidence = [];
+engine.jtree = [];
+engine.cliques = [];
+engine.cliques_bitv = [];
+engine.clique_weight = [];
+engine.preorder = [];
+engine.postorder = [];
+engine.root = [];
+engine.clq_ass_to_node = [];
+engine.separator = [];
+engine.clpot =[];
+engine.seppot = [];
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+/enter_evidence.m/1.1.1.1/Wed Jun 19 22:05:04 2002//
+/find_mpe.m/1.1.1.1/Wed Jun 19 22:11:42 2002//
+/marginal_nodes.m/1.1.1.1/Thu Sep 30 03:09:00 2004//
+/var_elim_inf_engine.m/1.1.1.1/Wed Jun 19 22:04:50 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static/@var_elim_inf_engine
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/enter_evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/enter_evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function [engine, loglik] = enter_evidence(engine, evidence, varargin)
+% ENTER_EVIDENCE Add the specified evidence to the network (var_elim)
+% [engine, loglik] = enter_evidence(engine, evidence, ...)
+%
+% evidence{i} = [] if if X(i) is hidden, and otherwise contains its observed value (scalar or column vector)
+
+% we could pre-process the evidence here, to prevent repeated work, but we don't.
+engine.evidence = evidence;
+
+if nargout == 2
+ [m, loglik] = marginal_nodes(engine, [1]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/find_mpe.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/find_mpe.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,163 @@
+function mpe = find_mpe(engine, new_evidence, max_over)
+% FIND_MPE Find the most probable explanation of the data (assignment to the hidden nodes)
+% function mpe = find_mpe(engine, evidence, order)
+%
+% PURPOSE:
+% CALC_MPE Computes the most probable explanation to the network nodes
+% given the evidence.
+%
+% [mpe, ll] = calc_mpe(engine, new_evidence, max_over)
+%
+% INPUT:
+% bnet - the bayesian network
+% new_evidence - optional, if specified - evidence to be incorporated [cell(1,n)]
+% max_over - optional, if specified determines the variable elimination order [1:n]
+%
+% OUTPUT:
+% mpe - the MPE assignmet for the net variables (or [] if no satisfying assignment)
+% ll - log assignment probability.
+%
+% Notes:
+% 1. Adapted from '@var_elim_inf_engine\marginal_nodes' for MPE by Ron Zohar, 8/7/01
+% 2. Only discrete potentials are supported at this time.
+% 3. Complexity: O(nw*) where n is the number of nodes and w* is the induced tree width.
+% 4. Implementation based on:
+% - R. Dechter, "Bucket Elimination: A Unifying Framework for Probabilistic Inference",
+% UA1 96, pp. 211-219.
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes;
+n = length(bnet.dag);
+evidence = cell(1,n);
+if (nargin<2)
+ new_evidence = evidence;
+end
+
+onodes = find(~isemptycell(new_evidence)); % observed nodes
+hnodes = find(isemptycell(new_evidence)); % hidden nodes
+pot_type = determine_pot_type(bnet, onodes);
+
+if pot_type ~= 'd'
+ error('only disrete potentials supported at this time')
+end
+
+for i=1:n
+ fam = family(bnet.dag, i);
+ CPT{i} = convert_to_pot(bnet.CPD{bnet.equiv_class(i)}, pot_type, fam(:), evidence);
+end
+
+% handle observed nodes: set impossible cases' probability to zero
+% rather than prun matrix (this makes backtracking easier)
+
+for ii=onodes
+ lIdx = 1:ns(ii);
+ lIdx = setdiff(lIdx, new_evidence{ii});
+
+ sCPT=struct(CPT{ii}); % violate object privacy
+
+ sargs = '';
+ for jj=1:(length(sCPT.domain)-1)
+ sargs = [sargs, ':,'];
+ end
+ for jj=lIdx
+ eval(['sCPT.T(', sargs, num2str(jj), ')=0;']);
+ end
+ CPT{ii}=dpot(sCPT.domain, sCPT.sizes, sCPT.T);
+end
+
+B = cell(1,n);
+for b=1:n
+ B{b} = mk_initial_pot(pot_type, [], [], [], []);
+end
+
+if (nargin<3)
+ max_over = (1:n);
+end
+order = max_over; % no attempt to optimize this
+
+
+% Initialize the buckets with the CPDs assigned to them
+for i=1:n
+ b = bucket_num(domain_pot(CPT{i}), order);
+ B{b} = multiply_pots(B{b}, CPT{i});
+end
+
+% Do backward phase
+max_over = max_over(length(max_over):-1:1); % reverse
+maximize = 1;
+for i=max_over(1:end-1)
+ % max-ing over variable i which occurs in bucket j
+ j = bucket_num(i, order);
+ rest = mysetdiff(domain_pot(B{j}), i);
+ %temp = marginalize_pot_max(B{j}, rest);
+ temp = marginalize_pot(B{j}, rest, maximize);
+ b = bucket_num(domain_pot(temp), order);
+ % fprintf('maxing over bucket %d (var %d), putting result into bucket %d\n', j, i, b);
+ sB=struct(B{b}); % violate object privacy
+ if ~isempty(sB.domain)
+ B{b} = multiply_pots(B{b}, temp);
+ else
+ B{b} = temp;
+ end
+end
+result = B{1};
+marginal = pot_to_marginal(result);
+[prob, mpe] = max(marginal.T);
+
+% handle impossible cases
+if ~(prob>0)
+ mpe = [];
+ ll = -inf;
+ %warning('evidence has zero probability')
+ return
+end
+
+ll = log(prob);
+
+% Do forward phase
+for ii=2:n
+ marginal = pot_to_marginal(B{ii});
+ mpeidx = [];
+ for jj=order(1:length(mpe))
+ %assert(ismember(jj, marginal.domain)) %%% bug
+ temp = find_equiv_posns(jj, marginal.domain);
+ mpeidx = [mpeidx, temp] ;
+ if isempty(temp)
+ mpeidx = [mpeidx, Inf] ;
+ end
+ end
+ [mpeidxsorted sortedtompe] = sort(mpeidx) ;
+
+ % maximize the matrix obtained from assigning values from previous buckets.
+ % this is done by building a string and using eval.
+
+ kk=1;
+ sargs = '(';
+ for jj=1:length(marginal.domain)
+ if (jj~=1)
+ sargs = [sargs, ','];
+ end
+ if (mpeidxsorted(kk)==jj)
+ sargs = [sargs, num2str(mpe(sortedtompe(kk)))];
+ if (kk=1);
+
+evidence = engine.evidence;
+
+bnet = bnet_from_engine(engine);
+ns = bnet.node_sizes;
+n = length(bnet.dag);
+
+onodes = find(~isemptycell(evidence));
+hnodes = find(isemptycell(evidence));
+pot_type = determine_pot_type(bnet, onodes);
+
+% Fold the evidence into the CPTs - this could be done in 'enter_evidence'
+CPT = cell(1,n);
+for i=1:n
+ fam = family(bnet.dag, i);
+ CPT{i} = convert_to_pot(bnet.CPD{bnet.equiv_class(i)}, pot_type, fam(:), evidence);
+end
+
+
+
+sum_over = mysetdiff(1:n, query);
+order = [query sum_over]; % no attempt to optimize this
+
+% Initialize the buckets with the product of the CPTs assigned to them
+B = cell(1,n+1);
+for b=1:n+1
+ B{b} = mk_initial_pot(pot_type, [], [], [], []);
+end
+for i=1:n
+ b = bucket_num(domain_pot(CPT{i}), order);
+ B{b} = multiply_pots(B{b}, CPT{i});
+end
+
+% Do the marginalization
+sum_over = sum_over(length(sum_over):-1:1); % reverse
+for i=sum_over(:)'
+ % summing over variable i which occurs in bucket j
+ j = bucket_num(i, order);
+ rest = mysetdiff(domain_pot(B{j}), i);
+ % minka
+ if ~isempty(rest)
+ temp = marginalize_pot(B{j}, rest);
+ b = bucket_num(domain_pot(temp), order);
+ %fprintf('summing over bucket %d (var %d), putting result into bucket %d\n', j, i, b);
+ B{b} = multiply_pots(B{b}, temp);
+ end
+end
+
+% Combine all the remaining buckets into one
+result = B{1};
+for i=2:length(query)
+ if ~isempty(domain_pot(B{i}))
+ result = multiply_pots(result, B{i});
+ end
+end
+[result, loglik] = normalize_pot(result);
+
+
+marginal = pot_to_marginal(result);
+% minka: from jtree_inf_engine
+if add_ev
+ bnet = bnet_from_engine(engine);
+ %marginal = add_ev_to_dmarginal(marginal, engine.evidence, bnet.node_sizes);
+ marginal = add_evidence_to_gmarginal(marginal, engine.evidence, bnet.node_sizes, bnet.cnodes);
+end
+
+%%%%%%%%%
+
+function b = bucket_num(domain, order)
+
+b = max(find_equiv_posns(domain, order));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/var_elim_inf_engine.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/@var_elim_inf_engine/var_elim_inf_engine.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function engine = var_elim_inf_engine(bnet, varargin)
+% VAR_ELIM_INF_ENGINE Variable elimination inference engine
+% engine = var_elim_inf_engine(bnet)
+%
+% For details on variable elimination, see
+% - R. Dechter, "Bucket Elimination: A Unifying Framework for Probabilistic Inference", UA1 96, pp. 211-219.
+% - Z. Li and B. D'Ambrosio, "Efficient inference in Bayes networks as a combinatorial
+% optimization problem", Intl. J. Approximate Reasoning, 11(1):55-81, 1994
+% - R. McEliece and S. M. Aji, "The Generalized Distributive Law", IEEE Trans. Inform. Theory, 46(2), 2000
+
+
+% This is where we will store the results between enter_evidence and marginal_nodes
+engine.evidence = [];
+
+engine = class(engine, 'var_elim_inf_engine', inf_engine(bnet));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/dummy/1.1.1.1/Sat Jan 18 22:22:46 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+A D/@belprop_fg_inf_engine////
+A D/@belprop_inf_engine////
+A D/@belprop_mrf2_inf_engine////
+A D/@cond_gauss_inf_engine////
+A D/@enumerative_inf_engine////
+A D/@gaussian_inf_engine////
+A D/@gibbs_sampling_inf_engine////
+A D/@global_joint_inf_engine////
+A D/@jtree_inf_engine////
+A D/@jtree_limid_inf_engine////
+A D/@jtree_mnet_inf_engine////
+A D/@jtree_sparse_inf_engine////
+A D/@likelihood_weighting_inf_engine////
+A D/@pearl_inf_engine////
+A D/@quickscore_inf_engine////
+A D/@stab_cond_gauss_inf_engine////
+A D/@var_elim_inf_engine////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/inference/static
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/inference/static/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/inference/static/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/installC_BNT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/installC_BNT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+BNT_HOME = '/home/ai2/murphyk/matlab/FullBNT'; % edit this
+
+d = fullfile(BNT_HOME, 'BNT');
+%PC = (strncmp(computer,'PC',2));
+
+cd(sprintf('%s/potentials/Tables', d))
+mex marg_table.c % used by @dpot/marginalize_pot.m
+mex marg_sparse_table.c %used by sparse jtree
+mex mult_by_table.c
+mex mult_by_sparse_table.c
+mex divide_by_table.c
+mex divide_by_sparse_table.c
+mex rep_mult.c
+
+% Written by Wei Hu
+cd(sprintf('%s/CPDs/@discrete_CPD', d))
+mex convert_to_sparse_table.c
+
+% Written by Wei Hu
+cd(sprintf('%s/inference/static/@jtree_sparse_inf_engine', d))
+mex init_pot.c
+mex collect_evidence.c
+mex distribute_evidence.c
+
+% written by Bhaskara Marthi
+cd(sprintf('%s/inference/static/@gibbs_sampling_inf_engine/private', d))
+mex compute_posterior.c
+mex get_slice_dbn.c
+mex sample_single_discrete.c
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/installC_graph.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/installC_graph.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+% These C functions were written by Ilya Shpitser.
+
+if isunix
+ mex -c elim.c;
+ mex -c cell.c;
+ mex -c map.c;
+ mex -DUNIX best_first_elim_order.c elim.o cell.o map.o;
+ mex -DUNIX triangulate.c elim.o cell.o map.o;
+else
+ mex -c elim.c;
+ mex -c cell.c;
+ mex -c map.c;
+ mex best_first_elim_order.c elim.obj cell.obj map.obj;
+ mex triangulate.c elim.obj cell.obj map.obj;
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+/CovMat.m/1.1.1.1/Sun Jul 28 23:09:42 2002//
+/bayes_update_params.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/bic_score_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/compute_cooling_schedule.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/dirichlet_score_family.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/kpm_learn_struct_mcmc.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/learn_params.m/1.1.1.1/Thu Jun 10 01:28:08 2004//
+/learn_params_dbn.m/1.1.1.1/Sun Feb 2 00:23:38 2003//
+/learn_params_dbn_em.m/1.1.1.1/Mon Aug 18 21:50:34 2003//
+/learn_params_em.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/learn_struct_K2.m/1.1.1.1/Thu Sep 26 08:39:16 2002//
+/learn_struct_dbn_reveal.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/learn_struct_mcmc.m/1.1.1.1/Fri Sep 5 14:06:40 2003//
+/learn_struct_pdag_ic_star.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/learn_struct_pdag_pc.m/1.2/Mon Feb 12 01:15:13 2007//
+/learn_struct_pdag_pc_constrain.m/1.1.1.1/Sun Jul 28 23:09:38 2002//
+/mcmc_sample_to_hist.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/mk_schedule.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/mk_tetrad_data_file.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/score_dags.m/1.1.1.1/Mon Apr 4 01:39:46 2005//
+/score_dags_old.m/1.1.1.1/Wed May 29 15:59:56 2002//
+/score_family.m/1.1.1.1/Thu Jun 10 01:33:14 2004//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/learning
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/CovMat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/CovMat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function [CovMatrix, obs, varfields] = CovMat(filename,row_cols)
+%[CovMatrix, obs, varfields] = CovMat(filename,row_cols)
+%% generates a Covariance Matrix from a file of data consisting of N columns of M data rows
+%% filename string name (with path and extension) of file to open
+%% row_cols Number_of_converstions_per_row (turns into [3 inf])
+%% Return
+%% CovMatrix Covariance matrix
+%% obs Number of observations read in
+%% varfields Labels of the variables see filename structure below
+%%
+%% Filename structure:
+%% Comma separated, starting with the variable labels, then the data in rows.
+%% filename test.txt consists of:
+%%
+%% Earthquake,Burglar,Radio,Alarm,Call
+%% 1,2,3,4,5
+%% 11,22,33,44,55
+%% . . .
+%%
+%% Example call:
+%% [cvmat numdat lables] = CovMat('test.txt',5);
+%%
+%% Returns Covariance matrix, number of date rows and variable field names
+%% Gary R. Bradski 7/2002
+
+fmtstr = '%f';
+for i = 2:row_cols
+ fmtstr = strcat(fmtstr,',%f');
+end
+
+%% load data
+fidCov = fopen(filename,'r');
+
+varfields = fgetl(fidCov);
+Corx = fscanf(fidCov,fmtstr,[row_cols inf]);
+Corx= Corx';
+[obs bla] = size(Corx);
+CovMatrix = cov(Corx);
+fclose(fidCov);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/bayes_update_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/bayes_update_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function bnet = bayes_update_params(bnet, cases, clamped)
+% BAYES_UPDATE_PARAMS Bayesian parameter updating given completely observed data
+% bnet = bayes_update_params(bnet, cases, clamped)
+%
+% If there is a missing data, you must use EM.
+% cases(i,m) is the value assigned to node i in case m (this can also be a cell array).
+% clamped(i,m) = 1 if node i was set by intervention in case m (default: clamped = zeros).
+% Clamped nodes are not updated.
+% If there is a single case, clamped is a list of the clamped nodes, not a bit vector.
+
+
+%if iscell(cases), usecell = 1; else usecell = 0; end
+
+n = length(bnet.dag);
+ncases = size(cases, 2);
+if n ~= size(cases, 1)
+ error('data must be of size nnodes * ncases');
+end
+
+if ncases == 1 % clamped is a list of nodes
+ if nargin < 3, clamped = []; end
+ clamp_set = clamped;
+ clamped = zeros(n,1);
+ clamped(clamp_set) = 1;
+else % each row of clamped is a bit vector
+ if nargin < 3, clamped = zeros(n,ncases); end
+end
+
+for i=1:n
+ e = bnet.equiv_class(i);
+ if adjustable_CPD(bnet.CPD{e})
+ u = find(clamped(i,:)==0);
+ ps = parents(bnet.dag, i);
+ bnet.CPD{e} = bayes_update_params(bnet.CPD{e}, cases(i,u), cases(ps,u));
+ end
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/bic_score_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/bic_score_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+function [S, LL] = bic_score(counts, CPT, ncases)
+% BIC_SCORE Bayesian Information Criterion score for a single family
+% [S, LL] = bic_score(counts, CPT, ncases)
+%
+% S is a large sample approximation to the log marginal likelihood,
+% which can be computed using dirichlet_score.
+%
+% S = \log [ prod_j _prod_k theta_ijk ^ N_ijk ] - 0.5*d*log(ncases)
+% where counts encode N_ijk, theta_ijk is the MLE comptued from counts,
+% and d is the num of free parameters.
+
+%CPT = mk_stochastic(counts);
+tiny = exp(-700);
+LL = sum(log(CPT(:) + tiny) .* counts(:));
+% CPT(i) = 0 iff counts(i) = 0 so it is okay to add tiny
+
+ns = mysize(counts);
+ns_ps = ns(1:end-1);
+ns_self = ns(end);
+nparams = prod([ns_ps (ns_self-1)]);
+% sum-to-1 constraint reduces the effective num. vals of the node by 1
+
+S = LL - 0.5*nparams*log(ncases);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/compute_cooling_schedule.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/compute_cooling_schedule.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function temp_schedule = compute_cooling_schedule(init_temp, final_temp, anneal_rate)
+
+temp_schedule = [];
+i = 1;
+temp_schedule(i)=init_temp;
+while temp_schedule(i) > final_temp
+ i = i + 1;
+ temp_schedule(i)=temp_schedule(i-1)*anneal_rate;
+end
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/dirichlet_score_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/dirichlet_score_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+function LL = dirichlet_score_family(counts, prior)
+% DIRICHLET_SCORE Compute the log marginal likelihood of a single family
+% LL = dirichlet_score(counts, prior)
+%
+% counts(a, b, ..., z) is the number of times parent 1 = a, parent 2 = b, ..., child = z
+% prior is an optional multidimensional array of the same shape as counts.
+% It defaults to a uniform prior.
+%
+% We marginalize out the parameters:
+% LL = log \int \prod_m P(x(i,m) | x(Pa_i,m), theta_i) P(theta_i) d(theta_i)
+
+
+% LL = log[ prod_j gamma(alpha_ij)/gamma(alpha_ij + N_ij) *
+% prod_k gamma(alpha_ijk + N_ijk)/gamma(alpha_ijk) ]
+% Call the prod_k term U and the prod_j term V.
+% We reshape all quantities into (j,k) matrices
+% This formula was first derived by Cooper and Herskovits, 1992.
+% See also "Learning Bayesian Networks", Heckerman, Geiger and Chickering, MLJ 95.
+
+ns = mysize(counts);
+ns_ps = ns(1:end-1);
+ns_self = ns(end);
+
+if nargin < 2, prior = normalise(myones(ns)); end
+
+
+if 1
+ prior = reshape(prior(:), [prod(ns_ps) ns_self]);
+ counts = reshape(counts, [prod(ns_ps) ns_self]);
+ %U = prod(gamma(prior + counts) ./ gamma(prior), 2); % mult over k
+ LU = sum(gammaln(prior + counts) - gammaln(prior), 2);
+ alpha_ij = sum(prior, 2); % sum over k
+ N_ij = sum(counts, 2);
+ %V = gamma(alpha_ij) ./ gamma(alpha_ij + N_ij);
+ LV = gammaln(alpha_ij) - gammaln(alpha_ij + N_ij);
+ %L = prod(U .* V);
+ LL = sum(LU + LV);
+else
+ CPT = mk_stochastic(prior + counts);
+ LL = sum(log(CPT(:) .* counts(:)));
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/kpm_learn_struct_mcmc.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/kpm_learn_struct_mcmc.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,265 @@
+function [sampled_graphs, accept_ratio, num_edges] = learn_struct_mcmc(data, ns, varargin)
+% LEARN_STRUCT_MCMC Monte Carla Markov Chain search over DAGs assuming fully observed data
+% [sampled_graphs, accept_ratio, num_edges] = learn_struct_mcmc(data, ns, ...)
+%
+% data(i,m) is the value of node i in case m.
+% ns(i) is the number of discrete values node i can take on.
+%
+% sampled_graphs{m} is the m'th sampled graph.
+% accept_ratio(t) = acceptance ratio at iteration t
+% num_edges(t) = number of edges in model at iteration t
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% scoring_fn - 'bayesian' or 'bic' [ 'bayesian' ]
+% Currently, only networks with all tabular nodes support Bayesian scoring.
+% type - type{i} is the type of CPD to use for node i, where the type is a string
+% of the form 'tabular', 'noisy_or', 'gaussian', etc. [ all cells contain 'tabular' ]
+% params - params{i} contains optional arguments passed to the CPD constructor for node i,
+% or [] if none. [ all cells contain {'prior', 1}, meaning use uniform Dirichlet priors ]
+% discrete - the list of discrete nodes [ 1:N ]
+% clamped - clamped(i,m) = 1 if node i is clamped in case m [ zeros(N, ncases) ]
+% nsamples - number of samples to draw from the chain after burn-in [ 100*N ]
+% burnin - number of steps to take before drawing samples [ 5*N ]
+% init_dag - starting point for the search [ zeros(N,N) ]
+%
+% e.g., samples = learn_struct_mcmc(data, ns, 'nsamples', 1000);
+%
+% This interface is not backwards compatible with BNT2,
+% but is designed to be compatible with the other learn_struct_xxx routines.
+%
+% Note: We currently assume a uniform structural prior.
+
+[n ncases] = size(data);
+
+
+% set default params
+type = cell(1,n);
+params = cell(1,n);
+for i=1:n
+ type{i} = 'tabular';
+ %params{i} = { 'prior', 1 };
+ params{i} = { 'prior_type', 'dirichlet', 'dirichlet_weight', 1 };
+end
+scoring_fn = 'bayesian';
+discrete = 1:n;
+clamped = zeros(n, ncases);
+nsamples = 100*n;
+burnin = 5*n;
+dag = zeros(n);
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'nsamples', nsamples = args{i+1};
+ case 'burnin', burnin = args{i+1};
+ case 'init_dag', dag = args{i+1};
+ case 'scoring_fn', scoring_fn = args{i+1};
+ case 'type', type = args{i+1};
+ case 'discrete', discrete = args{i+1};
+ case 'clamped', clamped = args{i+1};
+ case 'params', if isempty(args{i+1}), params = cell(1,n); else params = args{i+1}; end
+ end
+end
+
+% We implement the fast acyclicity check described by P. Giudici and R. Castelo,
+% "Improving MCMC model search for data mining", submitted to J. Machine Learning, 2001.
+use_giudici = 1;
+if use_giudici
+ [nbrs, ops, nodes] = mk_nbrs_of_digraph(dag);
+ A = init_ancestor_matrix(dag);
+else
+ [nbrs, ops, nodes] = mk_nbrs_of_dag(dag);
+ A = [];
+end
+
+num_accepts = 1;
+num_rejects = 1;
+T = burnin + nsamples;
+accept_ratio = zeros(1, T);
+num_edges = zeros(1, T);
+sampled_graphs = cell(1, nsamples);
+%sampled_bitv = zeros(nsamples, n^2);
+
+for t=1:T
+ [dag, nbrs, ops, nodes, A, accept] = take_step(dag, nbrs, ops, nodes, ns, data, clamped, A, ...
+ scoring_fn, discrete, type, params);
+ num_edges(t) = sum(dag(:));
+ num_accepts = num_accepts + accept;
+ num_rejects = num_rejects + (1-accept);
+ accept_ratio(t) = num_accepts/num_rejects;
+ if t > burnin
+ sampled_graphs{t-burnin} = dag;
+ %sampled_bitv(t-burnin, :) = dag(:)';
+ end
+end
+
+
+%%%%%%%%%
+
+
+function [new_dag, new_nbrs, new_ops, new_nodes, A, accept] = ...
+ take_step(dag, nbrs, ops, nodes, ns, data, clamped, A, ...
+ scoring_fn, discrete, type, params)
+
+
+use_giudici = ~isempty(A);
+if use_giudici
+ [new_dag, op, i, j] = pick_digraph_nbr(dag, nbrs, ops, nodes, A);
+ %assert(acyclic(new_dag));
+ [new_nbrs, new_ops, new_nodes] = mk_nbrs_of_digraph(new_dag);
+else
+ d = sample_discrete(normalise(ones(1, length(nbrs))));
+ new_dag = nbrs{d};
+ op = ops{d};
+ i = nodes(d, 1); j = nodes(d, 2);
+ [new_nbrs, new_ops, new_nodes] = mk_nbrs_of_dag(new_dag);
+end
+
+bf = bayes_factor(dag, new_dag, op, i, j, ns, data, clamped, scoring_fn, discrete, type, params);
+
+%R = bf * (new_prior / prior) * (length(nbrs) / length(new_nbrs));
+R = bf * (length(nbrs) / length(new_nbrs));
+u = rand(1,1);
+if u > min(1,R) % reject the move
+ accept = 0;
+ new_dag = dag;
+ new_nbrs = nbrs;
+ new_ops = ops;
+ new_nodes = nodes;
+else
+ accept = 1;
+ if use_giudici
+ A = update_ancestor_matrix(A, op, i, j, new_dag);
+ end
+end
+
+
+%%%%%%%%%
+
+function bfactor = bayes_factor(old_dag, new_dag, op, i, j, ns, data, clamped, scoring_fn, discrete, type, params)
+
+u = find(clamped(j,:)==0);
+LLnew = score_family(j, parents(new_dag, j), type{j}, scoring_fn, ns, discrete, data(:,u), params{j});
+LLold = score_family(j, parents(old_dag, j), type{j}, scoring_fn, ns, discrete, data(:,u), params{j});
+bf1 = exp(LLnew - LLold);
+
+if strcmp(op, 'rev') % must also multiply in the changes to i's family
+ u = find(clamped(i,:)==0);
+ LLnew = score_family(i, parents(new_dag, i), type{i}, scoring_fn, ns, discrete, data(:,u), params{i});
+ LLold = score_family(i, parents(old_dag, i), type{i}, scoring_fn, ns, discrete, data(:,u), params{i});
+ bf2 = exp(LLnew - LLold);
+else
+ bf2 = 1;
+end
+bfactor = bf1 * bf2;
+
+
+%%%%%%%% Giudici stuff follows %%%%%%%%%%
+
+
+function [new_dag, op, i, j] = pick_digraph_nbr(dag, digraph_nbrs, ops, nodes, A)
+
+legal = 0;
+while ~legal
+ d = sample_discrete(normalise(ones(1, length(digraph_nbrs))));
+ i = nodes(d, 1); j = nodes(d, 2);
+ switch ops{d}
+ case 'add',
+ if A(i,j)==0
+ legal = 1;
+ end
+ case 'del',
+ legal = 1;
+ case 'rev',
+ ps = mysetdiff(parents(dag, j), i);
+ % if any(A(ps,i)) then there is a path i -> parent of j -> j
+ % so reversing i->j would create a cycle
+ legal = ~any(A(ps, i));
+ end
+end
+%new_dag = digraph_nbrs{d};
+new_dag = digraph_nbrs(:,:,d);
+op = ops{d};
+i = nodes(d, 1); j = nodes(d, 2);
+
+
+%%%%%%%%%%%%%%
+
+
+function A = update_ancestor_matrix(A, op, i, j, dag)
+
+switch op
+ case 'add',
+ A = do_addition(A, op, i, j, dag);
+ case 'del',
+ A = do_removal(A, op, i, j, dag);
+ case 'rev',
+ A = do_removal(A, op, i, j, dag);
+ A = do_addition(A, op, j, i, dag);
+end
+
+
+%%%%%%%%%%%%
+
+function A = do_addition(A, op, i, j, dag)
+
+A(j,i) = 1; % i is an ancestor of j
+anci = find(A(i,:));
+if ~isempty(anci)
+ A(j,anci) = 1; % all of i's ancestors are added to Anc(j)
+end
+ancj = find(A(j,:));
+descj = find(A(:,j));
+if ~isempty(ancj)
+ for k=descj(:)'
+ A(k,ancj) = 1; % all of j's ancestors are added to each descendant of j
+ end
+end
+
+%%%%%%%%%%%
+
+function A = do_removal(A, op, i, j, dag)
+
+% find all the descendants of j, and put them in topological order
+%descj = find(A(:,j));
+R = reachability_graph(dag);
+descj = find(R(j,:));
+order = topological_sort(dag);
+descj_topnum = order(descj);
+[junk, perm] = sort(descj_topnum);
+descj = descj(perm);
+% Update j and all its descendants
+A = update_row(A, j, dag);
+for k = descj(:)'
+ A = update_row(A, k, dag);
+end
+
+%%%%%%%%%
+
+function A = update_row(A, j, dag)
+
+% We compute row j of A
+A(j, :) = 0;
+ps = parents(dag, j);
+if ~isempty(ps)
+ A(j, ps) = 1;
+end
+for k=ps(:)'
+ anck = find(A(k,:));
+ if ~isempty(anck)
+ A(j, anck) = 1;
+ end
+end
+
+%%%%%%%%
+
+function A = init_ancestor_matrix(dag)
+
+order = topological_sort(dag);
+A = zeros(length(dag));
+for j=order(:)'
+ A = update_row(A, j, dag);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_params.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_params.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function bnet = learn_params(bnet, data)
+% LEARN_PARAMS Find the maximum likelihood params for a fully observed model
+% bnet = learn_params(bnet, data)
+%
+% data(i,m) is the value of node i in case m (can be a cell array)
+%
+% We set bnet.CPD{i} to its ML/MAP estimate.
+%
+% Currently we assume no param tying
+
+% AND THAT EACH DATA POINT IS A SCALAR - no longer assumed
+
+%if iscell(data)
+% data=cell2num(data);
+%end
+[n ncases] = size(data);
+for j=1:n
+ e = bnet.equiv_class(j);
+ assert(e==j);
+ if adjustable_CPD(bnet.CPD{e})
+ fam = family(bnet.dag,j);
+ %bnet.CPD{j} = learn_params(bnet.CPD{j}, data(fam,:));
+ bnet.CPD{j} = learn_params(bnet.CPD{j}, fam, data, bnet.node_sizes, bnet.cnodes);
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_params_dbn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_params_dbn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+function bnet = learn_params_dbn(bnet, data)
+% LEARN_PARAM_DBN Estimate params of a DBN for a fully observed model
+% bnet = learn_params_dbn(bnet, data)
+%
+% data(i,t) is the value of node i in slice t (can be a cell array)
+% We currently assume there is a single time series
+%
+% We set bnet.CPD{i} to its ML/MAP estimate.
+%
+% Currently we assume each node in the first 2 slices has its own CPD (no param tying);
+% all nodes in slices >2 share their params with slice 2 as usual.
+
+[ss T] = size(data);
+
+% slice 1
+for j=1:ss
+ if adjustable_CPD(bnet.CPD{j})
+ fam = family(bnet.dag,j);
+ bnet.CPD{j} = learn_params(bnet.CPD{j}, data(fam,1));
+ end
+end
+
+
+% slices 2:T
+% data2(:,t) contains [data(:,t-1); data(:,t)].
+% Then we extract out the rows corresponding to the parents in the current and previous slice.
+data2 = [data(:,1:T-1);
+ data(:,2:T)];
+for j=1:ss
+ j2 = j+ss;
+ if adjustable_CPD(bnet.CPD{j2})
+ fam = family(bnet.dag,j2);
+ bnet.CPD{j2} = learn_params(bnet.CPD{j2}, data2(fam,:));
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_params_dbn_em.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_params_dbn_em.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,179 @@
+function [bnet, LL, engine] = learn_params_dbn_em(engine, evidence, varargin)
+% LEARN_PARAMS_DBN Set the parameters in a DBN to their ML/MAP values using batch EM.
+% [bnet, LLtrace, engine] = learn_params_dbn_em(engine, data, ...)
+%
+% data{l}{i,t} = value of node i in slice t of time-series l, or [] if hidden.
+% Suppose you have L time series, each of length T, in an O*T*L array D,
+% where O is the num of observed scalar nodes, and N is the total num nodes per slice.
+% Then you can create data as follows, where onodes is the index of the observable nodes:
+% data = cell(1,L);
+% for l=1:L
+% data{l} = cell(N, T);
+% data{l}(onodes,:) = num2cell(D(:,:,l));
+% end
+% Of course it is possible for different sets of nodes to be observed in
+% each slice/ sequence, and for each sequence to be a different length.
+%
+% LLtrace is the learning curve: the vector of log-likelihood scores at each iteration.
+%
+% Optional arguments [default]
+%
+% max_iter - specifies the maximum number of iterations [100]
+% thresh - specifies the thresold for stopping EM [1e-3]
+% We stop when |f(t) - f(t-1)| / avg < threshold,
+% where avg = (|f(t)| + |f(t-1)|)/2 and f is log lik.
+% verbose - display loglik at each iteration [1]
+% anneal - 1 means do deterministic annealing (only for entropic priors) [0]
+% anneal_rate - geometric cooling rate [0.8]
+% init_temp - initial annealing temperature [10]
+% final_temp - final annealing temperature [1e-3]
+%
+
+max_iter = 100;
+thresh = 1e-3;
+anneal = 0;
+anneal_rate = 0.8;
+init_temp = 10;
+final_temp = 1e-3;
+verbose = 1;
+
+for i=1:2:length(varargin)
+ switch varargin{i}
+ case 'max_iter', max_iter = varargin{i+1};
+ case 'thresh', thresh = varargin{i+1};
+ case 'anneal', anneal = varargin{i+1};
+ case 'anneal_rate', anneal_rate = varargin{i+1};
+ case 'init_temp', init_temp = varargin{i+1};
+ case 'final_temp', final_temp = varargin{i+1};
+ otherwise, error(['unrecognized argument' varargin{i}])
+ end
+end
+
+% take 1 EM step at each temperature value, then when temp=0, run to convergence
+% When using an entropic prior, Z = 1-T, so
+% T=2 => Z=-1 (max entropy)
+% T=1 => Z=0 (max likelihood)
+% T=0 => Z=1 (min entropy / max structure)
+num_iter = 1;
+LL = [];
+if anneal
+ temperature = init_temp;
+ while temperature > final_temp
+ [engine, loglik, logpost] = EM_step(engine, evidence, temperature);
+ if verbose
+ fprintf('EM iteration %d, loglik = %8.4f, logpost = %8.4f, temp=%8.4f\n', ...
+ num_iter, loglik, logpost, temperature);
+ end
+ num_iter = num_iter + 1;
+ LL = [LL loglik];
+ temperature = temperature * anneal_rate;
+ end
+ temperature = 0;
+ previous_loglik = loglik;
+ previous_logpost = logpost;
+else
+ temperature = 0;
+ previous_loglik = -inf;
+ previous_logpost = -inf;
+end
+
+converged = 0;
+while ~converged & (num_iter <= max_iter)
+ [engine, loglik, logpost] = EM_step(engine, evidence, temperature);
+ if verbose
+ %fprintf('EM iteration %d, loglik = %8.4f, logpost = %8.4f\n', ...
+ % num_iter, loglik, logpost);
+ fprintf('EM iteration %d, loglik = %8.4f\n', num_iter, loglik);
+ end
+ num_iter = num_iter + 1;
+ [converged, decreased] = em_converged(loglik, previous_loglik, thresh);
+ %[converged, decreased] = em_converged(logpost, previous_logpost, thresh);
+ previous_loglik = loglik;
+ previous_logpost = logpost;
+ LL = [LL loglik];
+end
+
+bnet = bnet_from_engine(engine);
+
+%%%%%%%%%
+
+function [engine, loglik, logpost] = EM_step(engine, cases, temp)
+
+bnet = bnet_from_engine(engine); % engine contains the old params that are used for the E step
+ss = length(bnet.intra);
+CPDs = bnet.CPD; % these are the new params that get maximized
+num_CPDs = length(CPDs);
+
+% log P(theta|D) = (log P(D|theta) + log P(theta)) - log(P(D))
+% where log P(D|theta) = sum_cases log P(case|theta)
+% and log P(theta) = sum_CPDs log P(CPD) - only count once even if tied!
+% logpost = log P(theta,D) (un-normalized)
+% This should be negative, and increase at every step.
+
+adjustable = zeros(1,num_CPDs);
+logprior = zeros(1, num_CPDs);
+for e=1:num_CPDs
+ adjustable(e) = adjustable_CPD(CPDs{e});
+end
+adj = find(adjustable);
+
+for e=adj(:)'
+ logprior(e) = log_prior(CPDs{e});
+ CPDs{e} = reset_ess(CPDs{e});
+end
+
+loglik = 0;
+for l=1:length(cases)
+ evidence = cases{l};
+ if ~iscell(evidence)
+ error('training data must be a cell array of cell arrays')
+ end
+ [engine, ll] = enter_evidence(engine, evidence);
+ assert(~isnan(ll))
+ loglik = loglik + ll;
+ T = size(evidence, 2);
+
+ % We unroll ns etc because in update_ess, we refer to nodes by their unrolled number
+ % so that they extract evidence from the right place.
+ % (The CPD should really store its own version of ns and cnodes...)
+ ns = repmat(bnet.node_sizes_slice(:), [1 T]);
+ cnodes = unroll_set(bnet.cnodes_slice, ss, T);
+
+ %hidden_bitv = repmat(bnet.hidden_bitv(1:ss), [1 T]);
+ hidden_bitv = zeros(ss, T);
+ hidden_bitv(isemptycell(evidence))=1;
+ % hidden_bitv(i) = 1 means node i is hidden.
+ % We pass this in, rather than using isemptycell(evidence(dom)), because
+ % isemptycell is very slow.
+
+ t = 1;
+ for i=1:ss
+ e = bnet.equiv_class(i,1);
+ if adjustable(e)
+ fmarg = marginal_family(engine, i, t);
+ CPDs{e} = update_ess(CPDs{e}, fmarg, evidence, ns(:), cnodes(:), hidden_bitv(:));
+ end
+ end
+
+ for i=1:ss
+ e = bnet.equiv_class(i,2);
+ if adjustable(e)
+ for t=2:T
+ fmarg = marginal_family(engine, i, t);
+ CPDs{e} = update_ess(CPDs{e}, fmarg, evidence, ns(:), cnodes(:), hidden_bitv(:));
+ end
+ end
+ end
+end
+
+logpost = loglik + sum(logprior(:));
+
+for e=adj(:)'
+ CPDs{e} = maximize_params(CPDs{e}, temp);
+end
+
+engine = update_engine(engine, CPDs);
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_params_em.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_params_em.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,88 @@
+function [bnet, LL, engine] = learn_params_em(engine, evidence, max_iter, thresh)
+% LEARN_PARAMS_EM Set the parameters of each adjustable node to their ML/MAP values using batch EM.
+% [bnet, LLtrace, engine] = learn_params_em(engine, data, max_iter, thresh)
+%
+% data{i,l} is the value of node i in case l, or [] if hidden.
+% Suppose you have L training cases in an O*L array, D, where O is the num observed
+% scalar nodes, and N is the total num nodes.
+% Then you can create 'data' as follows, where onodes is the index of the observable nodes:
+% data = cell(N, L);
+% data(onodes,:) = num2cell(D);
+% Of course it is possible for different sets of nodes to be observed in each case.
+%
+% We return the modified bnet and engine.
+% To see the learned parameters for node i, use the construct
+% s = struct(bnet.CPD{i}); % violate object privacy
+% LLtrace is the learning curve: the vector of log-likelihood scores at each iteration.
+%
+% max_iter specifies the maximum number of iterations. Default: 10.
+%
+% thresh specifies the thresold for stopping EM. Default: 1e-3.
+% We stop when |f(t) - f(t-1)| / avg < threshold,
+% where avg = (|f(t)| + |f(t-1)|)/2 and f is log lik.
+
+if nargin < 3, max_iter = 10; end
+if nargin < 4, thresh = 1e-3; end
+
+verbose = 1;
+
+loglik = 0;
+previous_loglik = -inf;
+converged = 0;
+num_iter = 1;
+LL = [];
+
+while ~converged & (num_iter <= max_iter)
+ [engine, loglik] = EM_step(engine, evidence);
+ if verbose, fprintf('EM iteration %d, ll = %8.4f\n', num_iter, loglik); end
+ num_iter = num_iter + 1;
+ converged = em_converged(loglik, previous_loglik, thresh);
+ previous_loglik = loglik;
+ LL = [LL loglik];
+end
+if verbose, fprintf('\n'); end
+
+bnet = bnet_from_engine(engine);
+
+%%%%%%%%%
+
+function [engine, loglik] = EM_step(engine, cases)
+
+bnet = bnet_from_engine(engine); % engine contains the old params that are used for the E step
+CPDs = bnet.CPD; % these are the new params that get maximized
+num_CPDs = length(CPDs);
+adjustable = zeros(1,num_CPDs);
+for e=1:num_CPDs
+ adjustable(e) = adjustable_CPD(CPDs{e});
+end
+adj = find(adjustable);
+n = length(bnet.dag);
+
+for e=adj(:)'
+ CPDs{e} = reset_ess(CPDs{e});
+end
+
+loglik = 0;
+ncases = size(cases, 2);
+for l=1:ncases
+ evidence = cases(:,l);
+ [engine, ll] = enter_evidence(engine, evidence);
+ loglik = loglik + ll;
+ hidden_bitv = zeros(1,n);
+ hidden_bitv(isemptycell(evidence))=1;
+ for i=1:n
+ e = bnet.equiv_class(i);
+ if adjustable(e)
+ fmarg = marginal_family(engine, i);
+ CPDs{e} = update_ess(CPDs{e}, fmarg, evidence, bnet.node_sizes, bnet.cnodes, hidden_bitv);
+ end
+ end
+end
+
+for e=adj(:)'
+ CPDs{e} = maximize_params(CPDs{e});
+end
+
+engine = update_engine(engine, CPDs);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_K2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_K2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,104 @@
+function dag = learn_struct_K2(data, ns, order, varargin)
+% LEARN_STRUCT_K2 Greedily learn the best structure compatible with a fixed node ordering
+% best_dag = learn_struct_K2(data, node_sizes, order, ...)
+%
+% data(i,m) = value of node i in case m (can be a cell array).
+% node_sizes(i) is the size of node i.
+% order(i) is the i'th node in the topological ordering.
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% max_fan_in - this the largest number of parents we allow per node [N]
+% scoring_fn - 'bayesian' or 'bic' [ 'bayesian' ]
+% Currently, only networks with all tabular nodes support Bayesian scoring.
+% type - type{i} is the type of CPD to use for node i, where the type is a string
+% of the form 'tabular', 'noisy_or', 'gaussian', etc. [ all cells contain 'tabular' ]
+% params - params{i} contains optional arguments passed to the CPD constructor for node i,
+% or [] if none. [ all cells contain {'prior', 1}, meaning use uniform Dirichlet priors ]
+% discrete - the list of discrete nodes [ 1:N ]
+% clamped - clamped(i,m) = 1 if node i is clamped in case m [ zeros(N, ncases) ]
+% verbose - 'yes' means display output while running [ 'no' ]
+%
+% e.g., dag = learn_struct_K2(data, ns, order, 'scoring_fn', 'bic', 'params', [])
+%
+% To be backwards compatible with BNT2, you can also specify arguments as follows
+% dag = learn_struct_K2(data, node_sizes, order, max_fan_in)
+%
+% This algorithm is described in
+% - Cooper and Herskovits, "A Bayesian method for the induction of probabilistic
+% networks from data", Machine Learning Journal 9:308--347, 1992
+
+[n ncases] = size(data);
+
+% set default params
+type = cell(1,n);
+params = cell(1,n);
+for i=1:n
+ type{i} = 'tabular';
+ %params{i} = { 'prior', 1 };
+ params{i} = { 'prior_type', 'dirichlet', 'dirichlet_weight', 1 };
+end
+scoring_fn = 'bayesian';
+discrete = 1:n;
+clamped = zeros(n, ncases);
+
+max_fan_in = n;
+verbose = 0;
+
+args = varargin;
+nargs = length(args);
+if length(args) > 0
+ if isstr(args{1})
+ for i=1:2:nargs
+ switch args{i},
+ case 'verbose', verbose = strcmp(args{i+1}, 'yes');
+ case 'max_fan_in', max_fan_in = args{i+1};
+ case 'scoring_fn', scoring_fn = args{i+1};
+ case 'type', type = args{i+1};
+ case 'discrete', discrete = args{i+1};
+ case 'clamped', clamped = args{i+1};
+ case 'params', if isempty(args{i+1}), params = cell(1,n); else params = args{i+1}; end
+ end
+ end
+ else
+ max_fan_in = args{1};
+ end
+end
+
+dag = zeros(n,n);
+
+for i=1:n
+ ps = [];
+ j = order(i);
+ u = find(clamped(j,:)==0);
+ score = score_family(j, ps, type{j}, scoring_fn, ns, discrete, data(:,u), params{j});
+ if verbose, fprintf('\nnode %d, empty score %6.4f\n', j, score); end
+ done = 0;
+ while ~done & (length(ps) <= max_fan_in)
+ pps = mysetdiff(order(1:i-1), ps); % potential parents
+ nps = length(pps);
+ pscore = zeros(1, nps);
+ for pi=1:nps
+ p = pps(pi);
+ pscore(pi) = score_family(j, [ps p], type{j}, scoring_fn, ns, discrete, data(:,u), params{j});
+ if verbose, fprintf('considering adding %d to %d, score %6.4f\n', p, j, pscore(pi)); end
+ end
+ [best_pscore, best_p] = max(pscore);
+ best_p = pps(best_p);
+ if best_pscore > score
+ score = best_pscore;
+ ps = [ps best_p];
+ if verbose, fprintf('* adding %d to %d, score %6.4f\n', best_p, j, best_pscore); end
+ else
+ done = 1;
+ end
+ end
+ if ~isempty(ps) % need this check for matlab 5.2
+ dag(ps, j) = 1;
+ end
+end
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_dbn_reveal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_dbn_reveal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,101 @@
+function inter = learn_struct_dbn_reveal(seqs, ns, max_fan_in, penalty)
+% LEARN_STRUCT_DBN_REVEAL Learn inter-slice adjacency matrix given fully observable discrete time series
+% inter = learn_struct_dbn_reveal(seqs, node_sizes, max_fan_in, penalty)
+%
+% seqs{l}{i,t} = value of node i in slice t of time-series l.
+% If you have a single time series in an N*T array D, use
+% seqs = { num2cell(D) }.
+% If you have L time series, each of length T, in an N*T*L array D, use
+% seqs= cell(1,L); for l=1:L, seqs{l} = num2cell(D(:,:,l)); end
+% or, in vectorized form,
+% seqs = squeeze(num2cell(num2cell(D),[1 2]));
+% Currently the data is assumed to be discrete (1,2,...)
+%
+% node_sizes(i) is the number of possible values for node i
+% max_fan_in is the largest number of parents we allow per node (default: N)
+% penalty is weight given to the complexity penalty (default: 0.5)
+% A penalty of 0.5 gives the BIC score.
+% A penalty of 0 gives the ML score.
+% Maximizing likelihood is equivalent to maximizing mutual information between parents and child.
+%
+% inter(i,j) = 1 iff node in slice t connects to node j in slice t+1
+%
+% The parent set for each node in slice 2 is computed by evaluating all subsets of nodes in slice 1,
+% and picking the largest scoring one. This takes O(n^k) time per node, where n is the num. nodes
+% per slice, and k <= n is the max fan in.
+% Since all the nodes are observed, we do not need to use an inference engine.
+% And since we are only learning the inter-slice matrix, we do not need to check for cycles.
+%
+% This algorithm is described in
+% - "REVEAL: A general reverse engineering algorithm for inference of genetic network
+% architectures", Liang et al. PSB 1998
+% - "Extended dependency analysis of large systems",
+% Roger Conant, Intl. J. General Systems, 1988, vol 14, pp 97-141
+% - "Learning the structure of DBNs", Friedman, Murphy and Russell, UAI 1998.
+
+n = length(ns);
+
+if nargin < 3, max_fan_in = n; end
+if nargin < 4, penalty = 0.5; end
+
+inter = zeros(n,n);
+
+if ~iscell(seqs)
+ data{1} = seqs;
+end
+
+nseq = length(seqs);
+nslices = 0;
+data = cell(1, nseq);
+for l=1:nseq
+ nslices = nslices + size(seqs{l}, 2);
+ data{l} = cell2num(seqs{l})'; % each row is a case
+end
+ndata = nslices - nseq; % subtract off the initial slice of each sequence
+
+% We concatenate the sequences as in the following example.
+% Let there be 2 sequences of lengths 4 and 5, with n nodes per slice,
+% and let i be the target node.
+% Then we construct following matrix D
+%
+% s{1}{1,1} ... s{1}{1,3} s{2}{1,1} ... s{2}{1,4}
+% ....
+% s{1}{n,1} ... s{1}{n,3} s{2}{n,1} ... s{2}{n,4}
+% s{1}{i,2} ... s{1}{i,4} s{2}{i,2} ... s{2}{i,5}
+%
+% D(1:n, i) is the i'th input and D(n+1, i) is the i'th output.
+%
+% We concatenate each sequence separately to avoid treating the transition
+% from the end of one sequence to the beginning of another as a "normal" transition.
+
+
+for i=1:n
+ D = [];
+ for l=1:nseq
+ T = size(seqs{l}, 2);
+ A = cell2num(seqs{l}(:, 1:T-1));
+ B = cell2num(seqs{l}(i, 2:T));
+ C = [A;B];
+ D = [D C];
+ end
+ SS = subsets(1:n, max_fan_in, 1); % skip the empty set
+ nSS = length(SS);
+ bic_score = zeros(1, nSS);
+ ll_score = zeros(1, nSS);
+ target = n+1;
+ ns2 = [ns ns(i)];
+ for h=1:nSS
+ ps = SS{h};
+ dom = [ps target];
+ counts = compute_counts(D(dom, :), ns2(dom));
+ CPT = mk_stochastic(counts);
+ [bic_score(h), ll_score(h)] = bic_score_family(counts, CPT, ndata);
+ end
+ if penalty == 0
+ h = argmax(ll_score);
+ else
+ h = argmax(bic_score);
+ end
+ ps = SS{h};
+ inter(ps, i) = 1;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_mcmc.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_mcmc.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,284 @@
+function [sampled_graphs, accept_ratio, num_edges] = learn_struct_mcmc(data, ns, varargin)
+% MY_LEARN_STRUCT_MCMC Monte Carlo Markov Chain search over DAGs assuming fully observed data
+% [sampled_graphs, accept_ratio, num_edges] = learn_struct_mcmc(data, ns, ...)
+%
+% data(i,m) is the value of node i in case m.
+% ns(i) is the number of discrete values node i can take on.
+%
+% sampled_graphs{m} is the m'th sampled graph.
+% accept_ratio(t) = acceptance ratio at iteration t
+% num_edges(t) = number of edges in model at iteration t
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% scoring_fn - 'bayesian' or 'bic' [ 'bayesian' ]
+% Currently, only networks with all tabular nodes support Bayesian scoring.
+% type - type{i} is the type of CPD to use for node i, where the type is a string
+% of the form 'tabular', 'noisy_or', 'gaussian', etc. [ all cells contain 'tabular' ]
+% params - params{i} contains optional arguments passed to the CPD constructor for node i,
+% or [] if none. [ all cells contain {'prior', 1}, meaning use uniform Dirichlet priors ]
+% discrete - the list of discrete nodes [ 1:N ]
+% clamped - clamped(i,m) = 1 if node i is clamped in case m [ zeros(N, ncases) ]
+% nsamples - number of samples to draw from the chain after burn-in [ 100*N ]
+% burnin - number of steps to take before drawing samples [ 5*N ]
+% init_dag - starting point for the search [ zeros(N,N) ]
+%
+% e.g., samples = my_learn_struct_mcmc(data, ns, 'nsamples', 1000);
+%
+% Modified by Sonia Leach (SML) 2/4/02, 9/5/03
+
+
+
+[n ncases] = size(data);
+
+
+% set default params
+type = cell(1,n);
+params = cell(1,n);
+for i=1:n
+ type{i} = 'tabular';
+ %params{i} = { 'prior', 1};
+ params{i} = { 'prior_type', 'dirichlet', 'dirichlet_weight', 1 };
+end
+scoring_fn = 'bayesian';
+discrete = 1:n;
+clamped = zeros(n, ncases);
+nsamples = 100*n;
+burnin = 5*n;
+dag = zeros(n);
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'nsamples', nsamples = args{i+1};
+ case 'burnin', burnin = args{i+1};
+ case 'init_dag', dag = args{i+1};
+ case 'scoring_fn', scoring_fn = args{i+1};
+ case 'type', type = args{i+1};
+ case 'discrete', discrete = args{i+1};
+ case 'clamped', clamped = args{i+1};
+ case 'params', if isempty(args{i+1}), params = cell(1,n); else params = args{i+1}; end
+ end
+end
+
+% We implement the fast acyclicity check described by P. Giudici and R. Castelo,
+% "Improving MCMC model search for data mining", submitted to J. Machine Learning, 2001.
+
+% SML: also keep descendant matrix C
+use_giudici = 1;
+if use_giudici
+ [nbrs, ops, nodes, A] = mk_nbrs_of_digraph(dag);
+else
+ [nbrs, ops, nodes] = mk_nbrs_of_dag(dag);
+ A = [];
+end
+
+num_accepts = 1;
+num_rejects = 1;
+T = burnin + nsamples;
+accept_ratio = zeros(1, T);
+num_edges = zeros(1, T);
+sampled_graphs = cell(1, nsamples);
+%sampled_bitv = zeros(nsamples, n^2);
+
+for t=1:T
+ [dag, nbrs, ops, nodes, A, accept] = take_step(dag, nbrs, ops, ...
+ nodes, ns, data, clamped, A, ...
+ scoring_fn, discrete, type, params);
+ num_edges(t) = sum(dag(:));
+ num_accepts = num_accepts + accept;
+ num_rejects = num_rejects + (1-accept);
+ accept_ratio(t) = num_accepts/num_rejects;
+ if t > burnin
+ sampled_graphs{t-burnin} = dag;
+ %sampled_bitv(t-burnin, :) = dag(:)';
+ end
+end
+
+
+%%%%%%%%%
+
+
+function [new_dag, new_nbrs, new_ops, new_nodes, A, accept] = ...
+ take_step(dag, nbrs, ops, nodes, ns, data, clamped, A, ...
+ scoring_fn, discrete, type, params, prior_w)
+
+
+use_giudici = ~isempty(A);
+if use_giudici
+ [new_dag, op, i, j, new_A] = pick_digraph_nbr(dag, nbrs, ops, nodes,A); % updates A
+ [new_nbrs, new_ops, new_nodes] = mk_nbrs_of_digraph(new_dag, new_A);
+else
+ d = sample_discrete(normalise(ones(1, length(nbrs))));
+ new_dag = nbrs{d};
+ op = ops{d};
+ i = nodes(d, 1); j = nodes(d, 2);
+ [new_nbrs, new_ops, new_nodes] = mk_nbrs_of_dag(new_dag);
+end
+
+bf = bayes_factor(dag, new_dag, op, i, j, ns, data, clamped, scoring_fn, discrete, type, params);
+
+%R = bf * (new_prior / prior) * (length(nbrs) / length(new_nbrs));
+R = bf * (length(nbrs) / length(new_nbrs));
+u = rand(1,1);
+if u > min(1,R) % reject the move
+ accept = 0;
+ new_dag = dag;
+ new_nbrs = nbrs;
+ new_ops = ops;
+ new_nodes = nodes;
+else
+ accept = 1;
+ if use_giudici
+A = new_A; % new_A already updated in pick_digraph_nbr
+ end
+end
+
+
+%%%%%%%%%
+
+function bfactor = bayes_factor(old_dag, new_dag, op, i, j, ns, data, clamped, scoring_fn, discrete, type, params)
+
+u = find(clamped(j,:)==0);
+LLnew = score_family(j, parents(new_dag, j), type{j}, scoring_fn, ns, discrete, data(:,u), params{j});
+LLold = score_family(j, parents(old_dag, j), type{j}, scoring_fn, ns, discrete, data(:,u), params{j});
+bf1 = exp(LLnew - LLold);
+
+if strcmp(op, 'rev') % must also multiply in the changes to i's family
+ u = find(clamped(i,:)==0);
+ LLnew = score_family(i, parents(new_dag, i), type{i}, scoring_fn, ns, discrete, data(:,u), params{i});
+ LLold = score_family(i, parents(old_dag, i), type{i}, scoring_fn, ns, discrete, data(:,u), params{i});
+ bf2 = exp(LLnew - LLold);
+else
+ bf2 = 1;
+end
+bfactor = bf1 * bf2;
+
+
+%%%%%%%% Giudici stuff follows %%%%%%%%%%
+
+
+% SML: This now updates A as it goes from digraph it choses
+function [new_dag, op, i, j, new_A] = pick_digraph_nbr(dag, digraph_nbrs, ops, nodes, A)
+
+d = sample_discrete(normalise(ones(1, length(digraph_nbrs))));
+%d = myunidrnd(length(digraph_nbrs),1,1);
+i = nodes(d, 1); j = nodes(d, 2);
+new_dag = digraph_nbrs(:,:,d);
+op = ops{d};
+new_A = update_ancestor_matrix(A, op, i, j, new_dag);
+
+
+%%%%%%%%%%%%%%
+
+
+function A = update_ancestor_matrix(A, op, i, j, dag)
+
+switch op
+case 'add',
+ A = do_addition(A, op, i, j, dag);
+case 'del',
+ A = do_removal(A, op, i, j, dag);
+case 'rev',
+ A = do_removal(A, op, i, j, dag);
+ A = do_addition(A, op, j, i, dag);
+end
+
+
+%%%%%%%%%%%%
+
+function A = do_addition(A, op, i, j, dag)
+
+A(j,i) = 1; % i is an ancestor of j
+anci = find(A(i,:));
+if ~isempty(anci)
+ A(j,anci) = 1; % all of i's ancestors are added to Anc(j)
+end
+ancj = find(A(j,:));
+descj = find(A(:,j));
+if ~isempty(ancj)
+ for k=descj(:)'
+ A(k,ancj) = 1; % all of j's ancestors are added to each descendant of j
+ end
+end
+
+%%%%%%%%%%%
+function A = do_removal(A, op, i, j, dag)
+
+% find all the descendants of j, and put them in topological order
+
+% SML: originally Kevin had the next line commented and the %* lines
+% being used but I think this is equivalent and much less expensive
+% I assume he put it there for debugging and never changed it back...?
+descj = find(A(:,j));
+%* R = reachability_graph(dag);
+%* descj = find(R(j,:));
+
+order = topological_sort(dag);
+
+% SML: originally Kevin used the %* line but this was extracting the
+% wrong things to sort
+%* descj_topnum = order(descj);
+[junk, perm] = sort(order); %SML:node i is perm(i)-TH in order
+descj_topnum = perm(descj); %SML:descj(i) is descj_topnum(i)-th in order
+
+% SML: now re-sort descj by rank in descj_topnum
+[junk, perm] = sort(descj_topnum);
+descj = descj(perm);
+
+% Update j and all its descendants
+A = update_row(A, j, dag);
+for k = descj(:)'
+ A = update_row(A, k, dag);
+end
+
+%%%%%%%%%%%
+
+function A = old_do_removal(A, op, i, j, dag)
+
+% find all the descendants of j, and put them in topological order
+% SML: originally Kevin had the next line commented and the %* lines
+% being used but I think this is equivalent and much less expensive
+% I assume he put it there for debugging and never changed it back...?
+descj = find(A(:,j));
+%* R = reachability_graph(dag);
+%* descj = find(R(j,:));
+
+order = topological_sort(dag);
+descj_topnum = order(descj);
+[junk, perm] = sort(descj_topnum);
+descj = descj(perm);
+% Update j and all its descendants
+A = update_row(A, j, dag);
+for k = descj(:)'
+ A = update_row(A, k, dag);
+end
+
+%%%%%%%%%
+
+function A = update_row(A, j, dag)
+
+% We compute row j of A
+A(j, :) = 0;
+ps = parents(dag, j);
+if ~isempty(ps)
+ A(j, ps) = 1;
+end
+for k=ps(:)'
+ anck = find(A(k,:));
+ if ~isempty(anck)
+ A(j, anck) = 1;
+ end
+end
+
+%%%%%%%%
+
+function A = init_ancestor_matrix(dag)
+
+order = topological_sort(dag);
+A = zeros(length(dag));
+for j=order(:)'
+ A = update_row(A, j, dag);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_pdag_ic_star.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_pdag_ic_star.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,155 @@
+function [pdag, G] = learn_struct_pdag_ic_star(cond_indep, n, k, varargin)
+% LEARN_STRUCT_PDAG_IC_STAR Learn a partially oriented DAG (pattern) with latent
+% variables using the IC* algorithm
+% P = learn_struct_pdag_ic_star(cond_indep, n, k, ...)
+%
+% n is the number of nodes.
+% k is an optional upper bound on the fan-in (default: n)
+% cond_indep is a boolean function that will be called as follows:
+% feval(cond_indep, x, y, S, ...)
+% where x and y are nodes, and S is a set of nodes (positive integers),
+% and ... are any optional parameters passed to this function.
+%
+% The output P is an adjacency matrix, in which
+% P(i,j) = -1 if there is either a latent variable L such that i <-L-> j
+% OR there is a directed edge from i->j.
+% P(i,j) = -2 if there is a marked directed i-*>j edge.
+% P(i,j) = P(j,i) = 1 if there is and undirected edge i--j
+% P(i,j) = P(j,i) = 2 if there is a latent variable L such that i<-L->j.
+%
+% The IC* algorithm learns a latent structure associated with a set of observed
+% variables.
+% The latent structure revealed is the projection in which every latent variable is
+% 1) a root node
+% 2) linked to exactly two observed variables.
+% Latent variables in the projection are represented using a bidirectional graph,
+% and thus remain implicit.
+%
+% See Pearl, "Causality: Models, Reasoning, and Inference", 2000, p52 for more details.
+% Written by Tamar Kushnir, 2000
+
+sep = cell(n,n);
+ord = 0;
+done = 0;
+G = ones(n,n);
+G = setdiag(G,0);
+while ~done
+ done = 1;
+ [X,Y] = find(G);
+ for i=1:length(X)
+ x = X(i); y = Y(i);
+ nbrs = mysetdiff(myunion(neighbors(G, x), neighbors(G,y)), [x y]);
+ if length(nbrs) >= ord & G(x,y) ~= 0
+ done = 0;
+ SS = subsets(nbrs, ord, ord); % all subsets of size ord
+ for si=1:length(SS)
+ S = SS{si};
+ if feval(cond_indep, x, y, S, varargin{:})
+ G(x,y) = 0;
+ G(y,x) = 0;
+ sep{x,y} = myunion(sep{x,y}, S);
+ sep{y,x} = myunion(sep{y,x}, S);
+ break; % no need to check any more subsets
+ end
+ end
+ end
+ end
+ ord = ord + 1;
+end
+
+% Create the minimal pattern,
+% i.e., the only directed edges are V structures.
+pdag = G;
+[X, Y] = find(G);
+% We want to generate all unique triples x,y,z
+% where y is a common neighbor to x and z
+for i=1:length(X)
+ x = X(i);
+ y = Y(i);
+ Z = find(G(y,:));
+ Z = mysetdiff(Z, x);
+ for z=Z(:)'
+ if G(x,z)==0 & ~ismember(y, sep{x,z}) & ~ismember(y, sep{z,x})
+ pdag(x,y) = -1; pdag(y,x) = 0;
+ pdag(z,y) = -1; pdag(y,z) = 0;
+ end
+ end
+end
+
+% Convert the minimal pattern to a complete one using the following rules:
+% Rule 1:
+% if a and b are non-adjacent nodes with a common neighbor c,
+% if a->c and not b->c then c-*>b (marked arrow).
+% Rule 2:
+% if a and b are adjacent and there is a directed path (marked links) from a to b
+% then a->b (add arrowhead).
+%Pearl (2000)
+
+arrowin = [-1 -2 2];
+old_pdag = zeros(n);
+iter = 0;
+while ~isequal(pdag, old_pdag)
+ iter = iter + 1;
+ old_pdag = pdag;
+ % rule 1
+ [X, Y] = find(pdag);
+ for i=1:length(X)
+ x = X(i);
+ y = Y(i);
+ Z = find(pdag(y,:));
+ Z = mysetdiff(Z, x);
+ for z=Z(:)'
+ if G(x,z)==0 & ismember(pdag(x,y),arrowin) & ~ismember(pdag(z,y),arrowin)
+ pdag(y,z) = -2; pdag(z,y) = 0;
+ end
+ end
+ end
+ % rule 2
+ [X, Y] = find(G);
+ %check all adjacent nodes because if pdag(x,y) = -1
+ %and pdag(y,x) = 0 there could still be an bidirected edge between x & y.
+ for i=1:length(X)
+ x = X(i);
+ y = Y(i);
+ if ~ismember(pdag(x,y), arrowin) %x->y doesn't exist yet
+ %find marked path from x to y
+ add_arrow = marked_path(x,y,pdag);
+ if add_arrow
+ if pdag(y,x)==-1 %bidirected edge
+ pdag(x,y) = 2; pdag(y,x) = 2;
+ else
+ pdag(x,y) = -1;pdag(y,x) = 0;
+ end
+ end
+ end
+ end
+end
+
+
+%%%%%%%%%%%%%
+
+function t = marked_path(x,y,L)
+% MARKED_PATH is a boolean function which returns 1 if a marked path
+% between nodes x and y exists in the partially directed latent structure L.
+%
+% t = marked_path(x,y,L)
+%
+% x and y are the starting and ending nodes in the path, respectively.
+% L is a latent structure (partially directed graph with possible latent variables).
+%
+% Rule 2 of IC* algorithm (see Pearl, 2000)
+
+t=0;
+
+%find set of marked links from x
+marked = find(L(x,:)==-2);
+if ismember(y,marked)
+ t=1; %marked path found
+else
+ for m=marked(:)'
+ t = marked_path(m,y,L);
+ if t==1
+ break; %stop when marked path found
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_pdag_pc.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_pdag_pc.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,129 @@
+function [pdag, G] = learn_struct_pdag_pc(cond_indep, n, k, varargin)
+% LEARN_STRUCT_PDAG_PC Learn a partially oriented DAG (pattern) using the PC algorithm
+% P = learn_struct_pdag_pc(cond_indep, n, k, ...)
+%
+% n is the number of nodes.
+% k is an optional upper bound on the fan-in (default: n)
+% cond_indep is a boolean function that will be called as follows:
+% feval(cond_indep, x, y, S, ...)
+% where x and y are nodes, and S is a set of nodes (positive integers),
+% and ... are any optional parameters passed to this function.
+%
+% The output P is an adjacency matrix, in which
+% P(i,j) = -1 if there is an i->j edge.
+% P(i,j) = P(j,i) = 1 if there is an undirected edge i <-> j
+%
+% The PC algorithm does structure learning assuming all variables are observed.
+% See Spirtes, Glymour and Scheines, "Causation, Prediction and Search", 1993, p117.
+% This algorithm may take O(n^k) time if there are n variables and k is the max fan-in,
+% but this is quicker than the Verma-Pearl IC algorithm, which is always O(n^n).
+
+
+sep = cell(n,n);
+ord = 0;
+done = 0;
+G = ones(n,n);
+G=setdiag(G,0);
+while ~done
+ done = 1;
+ [X,Y] = find(G);
+ for i=1:length(X)
+ x = X(i); y = Y(i);
+ %nbrs = mysetdiff(myunion(neighbors(G, x), neighbors(G,y)), [x y]);
+ nbrs = mysetdiff(neighbors(G, y), x); % bug fix by Raanan Yehezkel 6/27/04
+ if length(nbrs) >= ord & G(x,y) ~= 0
+ done = 0;
+ %SS = subsets(nbrs, ord, ord); % all subsets of size ord
+ SS = subsets1(nbrs, ord);
+ for si=1:length(SS)
+ S = SS{si};
+ if feval(cond_indep, x, y, S, varargin{:})
+ %if isempty(S)
+ % fprintf('%d indep of %d ', x, y);
+ %else
+ % fprintf('%d indep of %d given ', x, y); fprintf('%d ', S);
+ %end
+ %fprintf('\n');
+
+ % diagnostic
+ %[CI, r] = cond_indep_fisher_z(x, y, S, varargin{:});
+ %fprintf(': r = %6.4f\n', r);
+
+ G(x,y) = 0;
+ G(y,x) = 0;
+ sep{x,y} = myunion(sep{x,y}, S);
+ sep{y,x} = myunion(sep{y,x}, S);
+ break; % no need to check any more subsets
+ end
+ end
+ end
+ end
+ ord = ord + 1;
+end
+
+
+% Create the minimal pattern,
+% i.e., the only directed edges are V structures.
+pdag = G;
+[X, Y] = find(G);
+% We want to generate all unique triples x,y,z
+% This code generates x,y,z and z,y,x.
+for i=1:length(X)
+ x = X(i);
+ y = Y(i);
+ Z = find(G(y,:));
+ Z = mysetdiff(Z, x);
+ for z=Z(:)'
+ if G(x,z)==0 & ~ismember(y, sep{x,z}) & ~ismember(y, sep{z,x})
+ %fprintf('%d -> %d <- %d\n', x, y, z);
+ pdag(x,y) = -1; pdag(y,x) = 0;
+ pdag(z,y) = -1; pdag(y,z) = 0;
+ end
+ end
+end
+
+% Convert the minimal pattern to a complete one,
+% i.e., every directed edge in P is compelled
+% (must be directed in all Markov equivalent models),
+% and every undirected edge in P is reversible.
+% We use the rules of Pearl (2000) p51 (derived in Meek (1995))
+
+old_pdag = zeros(n);
+iter = 0;
+while ~isequal(pdag, old_pdag)
+ iter = iter + 1;
+ old_pdag = pdag;
+ % rule 1
+ [A,B] = find(pdag==-1); % a -> b
+ for i=1:length(A)
+ a = A(i); b = B(i);
+ C = find(pdag(b,:)==1 & G(a,:)==0); % all nodes adj to b but not a
+ if ~isempty(C)
+ pdag(b,C) = -1; pdag(C,b) = 0;
+ %fprintf('rule 1: a=%d->b=%d and b=%d-c=%d implies %d->%d\n', a, b, b, C, b, C);
+ end
+ end
+ % rule 2
+ [A,B] = find(pdag==1); % unoriented a-b edge
+ for i=1:length(A)
+ a = A(i); b = B(i);
+ if any( (pdag(a,:)==-1) & (pdag(:,b)==-1)' );
+ pdag(a,b) = -1; pdag(b,a) = 0;
+ %fprintf('rule 2: %d -> %d\n', a, b);
+ end
+ end
+ % rule 3
+ [A,B] = find(pdag==1); % a-b
+ for i=1:length(A)
+ a = A(i); b = B(i);
+ C = find( (pdag(a,:)==1) & (pdag(:,b)==-1)' );
+ % C contains nodes c s.t. a-c->ba
+ G2 = setdiag(G(C, C), 1);
+ if any(G2(:)==0) % there are 2 different non adjacent elements of C
+ pdag(a,b) = -1; pdag(b,a) = 0;
+ %fprintf('rule 3: %d -> %d\n', a, b);
+ end
+ end
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_pdag_pc_constrain.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/learn_struct_pdag_pc_constrain.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,177 @@
+function [pdag, G] = dn_learn_struct_pdag_pc_constrain(adj, cond_indep, n, k, varargin)
+% LEARN_STRUCT_PDAG_PC Learn a partially oriented DAG (pattern) using the PC algorithm
+% Pdag = learn_struct_pdag_pc_constrain(adj, cond_indep, n, k, ...)
+%
+% adj = adjacency matrix learned from dependency network P(i,j) = 1 => i--j; 0 => i j
+% n is the number of nodes.
+% k is an optional upper bound on the fan-in (default: n)
+% cond_indep is a boolean function that will be called as follows:
+% feval(cond_indep, x, y, S, ...)
+% where x and y are nodes, and S is a set of nodes (positive integers),
+% and ... are any optional parameters passed to this function.
+%
+%Output
+% pdag Partially directed graph
+% G Resulting adjacency graph prior to setting direction arrows
+%
+% The output P is an adjacency matrix, in which
+% P(i,j) = -1 if there is an i->j edge.
+% P(i,j) = P(j,i) = 1 if there is an undirected edge i <-> j
+%
+% The PC algorithm does structure learning assuming all variables are observed.
+% See Spirtes, Glymour and Scheines, "Causation, Prediction and Search", 1993, p117.
+% This algorithm may take O(n^k) time if there are n variables and k is the max fan-in,
+% but this is quicker than the Verma-Pearl IC algorithm, which is always O(n^n).
+%
+%% Example
+%% Given data in a comma separated, filename starting with the variable labels, then the data in rows.
+%% filename test.txt consists of:
+%%
+%% Earthquake,Burglar,Radio,Alarm,Call
+%% 1,2,2,2,1
+%% 1,1,2,1,2
+%% . . .
+%[CovMatrix, obs, varfields] = CovMat('test.txt',5);
+%
+%dn = zeros(5,5);
+%dn(1,2) = 1; % This was the known Markov blanket of the system that generated test.txt
+%dn(2,1) = 1;
+%dn(2,4) = 1;
+%dn(4,2) = 1;
+%dn(1,3) = 1;
+%dn(3,1) = 1;
+%dn(1,4) = 1;
+%dn(4,1) = 1;
+%dn(4,5) = 1;
+%dn(5,4) = 1;
+%dn(3,5) = 1; %loop r->c
+%dn(5,3) = 1; %loop c-r
+%dn(3,4) = 1;
+%dn(4,3) = 1;
+%
+%max_fan_in = 4;
+%alpha = 0.05;
+%
+%[pdag G] = learn_struct_pdag_pc_constrain(dn,'cond_indep_fisher_z', 5, max_fan_in, CovMatrix, obs, alpha);
+%%
+%%
+%% Gary Bradski, 7/2002 Modified this to take an adjacency matrix from a dependency network.
+
+
+sep = cell(n,n);
+ord = 0;
+done = 0;
+G = ones(n,n);
+G=setdiag(G,0);
+
+while ~done
+ done = 1;
+ [X,Y] = find(G);
+ for i=1:length(X)
+ x = X(i); y = Y(i);
+% nbrs = mysetdiff(myunion(neighbors(G, x), neighbors(G,y)), [x y]);%parents, children, but not self
+ nbrs = mysetdiff(myunion(neighbors(adj, x), neighbors(adj,y)), [x y]);%parents, children, but not self
+
+ if length(nbrs) >= ord & G(x,y) ~= 0
+ done = 0;
+ SS = subsets(nbrs, ord, ord); % all subsets of size ord
+ for si=1:length(SS)
+ S = SS{si};
+ %if (feval(dsep,x,y,S,adj)) | (feval(cond_indep, x, y, S, varargin{:}))
+ if feval(cond_indep, x, y, S, varargin{:})
+ %if isempty(S)
+ % fprintf('%d indep of %d ', x, y);
+ %else
+ % fprintf('%d indep of %d given ', x, y); fprintf('%d ', S);
+ %end
+ %fprintf('\n');
+
+ % diagnostic
+ %[CI, r] = cond_indep_fisher_z(x, y, S, varargin{:});
+ %fprintf(': r = %6.4f\n', r);
+
+ G(x,y) = 0;
+ G(y,x) = 0;
+ adj(x,y) = 0; %make sure found cond. independencies are marked out
+ adj(y,x) = 0;
+ sep{x,y} = myunion(sep{x,y}, S);
+ sep{y,x} = myunion(sep{y,x}, S);
+ break; % no need to check any more subsets
+ end
+ end
+ end
+ end
+ ord = ord + 1;
+end
+
+
+
+
+% Create the minimal pattern,
+% i.e., the only directed edges are V structures.
+
+pdag = G;
+[X, Y] = find(G);
+% We want to generate all unique triples x,y,z
+% This code generates x,y,z and z,y,x.
+for i=1:length(X)
+ x = X(i);
+ y = Y(i);
+ Z = find(G(y,:));
+ Z = mysetdiff(Z, x);
+ for z=Z(:)'
+ if G(x,z)==0 & ~ismember(y, sep{x,z}) & ~ismember(y, sep{z,x})
+ %fprintf('%d -> %d <- %d\n', x, y, z);
+ pdag(x,y) = -1; pdag(y,x) = 0;
+ pdag(z,y) = -1; pdag(y,z) = 0;
+ end
+ end
+end
+
+% Convert the minimal pattern to a complete one,
+% i.e., every directed edge in P is compelled
+% (must be directed in all Markov equivalent models),
+% and every undirected edge in P is reversible.
+% We use the rules of Pearl (2000) p51 (derived in Meek (1995))
+
+old_pdag = zeros(n);
+iter = 0;
+while ~isequal(pdag, old_pdag)
+ iter = iter + 1;
+ old_pdag = pdag;
+ % rule 1
+ [A,B] = find(pdag==-1); % a -> b
+ for i=1:length(A)
+ a = A(i); b = B(i);
+ C = find(pdag(b,:)==1 & G(a,:)==0); % all nodes adj to b but not a
+ if ~isempty(C)
+ pdag(b,C) = -1; pdag(C,b) = 0;
+ %fprintf('rule 1: a=%d->b=%d and b=%d-c=%d implies %d->%d\n', a, b, b, C, b, C);
+ end
+ end
+ % rule 2
+ [A,B] = find(pdag==1); % unoriented a-b edge
+ for i=1:length(A)
+ a = A(i); b = B(i);
+ if any( (pdag(a,:)==-1) & (pdag(:,b)==-1)' );
+ pdag(a,b) = -1; pdag(b,a) = 0;
+ %fprintf('rule 2: %d -> %d\n', a, b);
+ end
+ end
+ % rule 3
+ [A,B] = find(pdag==1); % a-b
+ for i=1:length(A)
+ a = A(i); b = B(i);
+ C = find( (G(a,:)==1) & (pdag(:,b)==-1)' );
+ % C contains nodes c s.t. a-c->ba
+ G2 = setdiag(G(C, C), 1);
+ if any(G2(:)==0) % there are 2 different non adjacent elements of C
+ pdag(a,b) = -1; pdag(b,a) = 0;
+ %fprintf('rule 3: %d -> %d\n', a, b);
+ end
+ end
+end
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/mcmc_sample_to_hist.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/mcmc_sample_to_hist.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+function mcmc_post = mcmc_sample_to_hist(sampled_graphs, dags)
+% MCMC_SAMPLE_TO_HIST Convert a set of sampled dags into a histogram over dags
+% hist = mcmc_sample_to_hist(sampled_graphs, dags)
+%
+% sampled_graphs{m} is the m'th sampled dag
+% dags{i} is the i'th dag in the hypothesis space
+% hist(i) = Pr(model i | data)
+
+ndags = length(dags);
+nsamples = length(sampled_graphs);
+nnodes = length(dags{1});
+% sampled_bitv(m, :) is the m'th sampled graph represented as a vector of n^2 bits, computed
+% by stacking the columns of the adjacency matrix vertically.
+sampled_bitvs = zeros(nsamples, nnodes*nnodes);
+for m=1:nsamples
+ sampled_bitvs(m, :) = sampled_graphs{m}(:)';
+end
+
+[ugraphs, I, J] = unique(sampled_bitvs, 'rows'); % each row of ugraphs is a unique bit vector
+sampled_indices = subv2ind(2*ones(1,nnodes*nnodes), ugraphs+1);
+counts = hist(J, 1:size(ugraphs,1)); % counts(i) = number of times graphs(i,:) occurs in the sample
+
+mcmc_post = zeros(1, ndags);
+for i=1:ndags
+ bitv = dags{i}(:)';
+ % Find the samples that corresponds to this graph by converting the graphs to bitvectors and
+ % then to integers.
+ ndx = subv2ind(2*ones(1,nnodes*nnodes), bitv+1);
+ locn = find(ndx == sampled_indices);
+ if ~isempty(locn)
+ mcmc_post(i) = counts(locn);
+ end
+end
+mcmc_post = normalise(mcmc_post);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/mk_schedule.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/mk_schedule.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function schedule = mk_schedule(init_temp, final_temp, anneal_rate)
+
+init_temp = 10; final_temp = 1e-2; anneal_rate = 0.8;
+schedule = [];
+temp=init_temp;
+schedule = [schedule temp];
+while temp > final_temp
+ temp = temp * anneal_rate;
+ schedule = [schedule temp];
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/mk_tetrad_data_file.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/mk_tetrad_data_file.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function mk_tetrad_data_file(filename, samples, sig)
+% MK_TETRAD_DATA_FILE Make a file containing raw discrete data for input to TETRAD
+% mk_tetrad_data_file(filename, samples, sig)
+%
+% samples(i,j) is the value for case i, variable j
+% The resulting file can be used for the 'build' part of Tetrad.
+% For details on tetrad, see hss.cmu.edu/html/departments/philosophy/TETRAD/tetrad.html
+
+[nsamples N] = size(samples);
+
+fid = fopen(filename, 'w');
+fprintf(fid, '/Raw\n');
+fprintf(fid, '%d\n', nsamples);
+for i=1:N
+ fprintf(fid, 'x%d ', i);
+end
+fprintf(fid, '\n');
+for i=1:nsamples
+ fprintf(fid, '%d ', samples(i,:)-1); % tetrad counts from 0
+ fprintf(fid, '\n');
+end
+%fprintf(fid, '/Knowledge\n');
+%fprintf(fid, 'Significance %4.2f\n', sig);
+fclose(fid);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/score_dags.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/score_dags.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,67 @@
+function score = score_dags(data, ns, dags, varargin)
+% SCORE_DAGS Compute the score of one or more DAGs
+% score = score_dags(data, ns, dags, varargin)
+%
+% data{i,m} = value of node i in case m (can be a cell array).
+% node_sizes(i) is the number of size of node i.
+% dags{g} is the g'th dag
+% score(g) is the score of the i'th dag
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% scoring_fn - 'bayesian' or 'bic' [ 'bayesian' ]
+% Currently, only networks with all tabular nodes support Bayesian scoring.
+% type - type{i} is the type of CPD to use for node i, where the type is a string
+% of the form 'tabular', 'noisy_or', 'gaussian', etc. [ all cells contain 'tabular' ]
+% params - params{i} contains optional arguments passed to the CPD constructor for node i,
+% or [] if none. [ all cells contain {'prior', 1}, meaning use uniform Dirichlet priors ]
+% discrete - the list of discrete nodes [ 1:N ]
+% clamped - clamped(i,m) = 1 if node i is clamped in case m [ zeros(N, ncases) ]
+%
+% e.g., score = score_dags(data, ns, mk_all_dags(n), 'scoring_fn', 'bic', 'params', []);
+%
+% If the DAGs have a lot of families in common, we can cache the sufficient statistics,
+% making this potentially more efficient than scoring the DAGs one at a time.
+% (Caching is not currently implemented, however.)
+
+[n ncases] = size(data);
+
+% set default params
+type = cell(1,n);
+params = cell(1,n);
+for i=1:n
+ type{i} = 'tabular';
+ params{i} = { 'prior_type', 'dirichlet', 'dirichlet_weight', 1 };
+end
+scoring_fn = 'bayesian';
+discrete = 1:n;
+
+u = [1:ncases]'; % DWH
+isclamped = 0; %DWH
+clamped = zeros(n, ncases);
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'scoring_fn', scoring_fn = args{i+1};
+ case 'type', type = args{i+1};
+ case 'discrete', discrete = args{i+1};
+ case 'clamped', clamped = args{i+1}, isclamped = 1; %DWH
+ case 'params', if isempty(args{i+1}), params = cell(1,n); else params = args{i+1}; end
+ end
+end
+
+NG = length(dags);
+score = zeros(1, NG);
+for g=1:NG
+ dag = dags{g};
+ for j=1:n
+ if isclamped %DWH
+ u = find(clamped(j,:)==0);
+ end
+ ps = parents(dag, j);
+ score(g) = score(g) + score_family(j, ps, type{j}, scoring_fn, ns, discrete, data(:,u), params{j});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/score_dags_old.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/score_dags_old.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+function score = score_dags(data, ns, dags, varargin)
+% SCORE_DAGS Compute the score of one or more DAGs
+% score = score_dags(data, ns, dags, varargin)
+%
+% data{i,m} = value of node i in case m (can be a cell array).
+% node_sizes(i) is the number of size of node i.
+% dags{g} is the g'th dag
+% score(g) is the score of the i'th dag
+%
+% The following optional arguments can be specified in the form of name/value pairs:
+% [default value in brackets]
+%
+% scoring_fn - 'bayesian' or 'bic' [ 'bayesian' ]
+% Currently, only networks with all tabular nodes support Bayesian scoring.
+% type - type{i} is the type of CPD to use for node i, where the type is a string
+% of the form 'tabular', 'noisy_or', 'gaussian', etc. [ all cells contain 'tabular' ]
+% params - params{i} contains optional arguments passed to the CPD constructor for node i,
+% or [] if none. [ all cells contain {'prior', 1}, meaning use uniform Dirichlet priors ]
+% discrete - the list of discrete nodes [ 1:N ]
+% clamped - clamped(i,m) = 1 if node i is clamped in case m [ zeros(N, ncases) ]
+%
+% e.g., score = score_dags(data, ns, mk_all_dags(n), 'scoring_fn', 'bic', 'params', []);
+%
+% If the DAGs have a lot of families in common, we can cache the sufficient statistics,
+% making this potentially more efficient than scoring the DAGs one at a time.
+% (Caching is not currently implemented, however.)
+
+[n ncases] = size(data);
+
+% set default params
+type = cell(1,n);
+params = cell(1,n);
+for i=1:n
+ type{i} = 'tabular';
+ params{i} = { 'prior_type', 'dirichlet', 'dirichlet_weight', 1 };
+end
+scoring_fn = 'bayesian';
+discrete = 1:n;
+clamped = zeros(n, ncases);
+
+args = varargin;
+nargs = length(args);
+for i=1:2:nargs
+ switch args{i},
+ case 'scoring_fn', scoring_fn = args{i+1};
+ case 'type', type = args{i+1};
+ case 'discrete', discrete = args{i+1};
+ case 'clamped', clamped = args{i+1};
+ case 'params', if isempty(args{i+1}), params = cell(1,n); else params = args{i+1}; end
+ end
+end
+
+NG = length(dags);
+score = zeros(1, NG);
+for g=1:NG
+ dag = dags{g};
+ for j=1:n
+ u = find(clamped(j,:)==0);
+ ps = parents(dag, j);
+ score(g) = score(g) + score_family(j, ps, type{j}, scoring_fn, ns, discrete, data(:,u), params{j});
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/learning/score_family.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/learning/score_family.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+function score = score_family(j, ps, node_type, scoring_fn, ns, discrete, data, args)
+% SCORE_FAMILY_COMPLETE Compute the score of a node and its parents given completely observed data
+% score = score_family(j, ps, node_type, scoring_fn, ns, discrete, data, args)
+%
+% data(i,m) is the value of node i in case m (can be a cell array)
+% args is a cell array containing optional arguments passed to the constructor,
+% or is [] if none
+%
+% We create a whole Bayes net which only connects parents to node,
+% where node has a CPD of the specified type (with default parameters).
+% We then evaluate its score ('bic' or 'bayesian')
+
+% We should use a cache to avoid unnecessary computation.
+% In particular, log_marginal_prob_node for tabular CPDs calls gammaln
+% and compute_counts, both of which are slow.
+
+[n ncases] = size(data);
+dag = zeros(n,n);
+% SML added to sort ps b/c mk_bnet, learn_params use sorted ps to make
+% CPTs
+% Kevin had: if ~isempty(ps), dag(ps, j) = 1; end
+if ~isempty(ps), dag(ps, j) = 1;, ps = sort(ps);, end
+
+bnet = mk_bnet(dag, ns, 'discrete', discrete);
+%bnet.CPD{j} = xxx_CPD(bnet, j);
+%eval(sprintf('bnet.CPD{j} = %s_CPD(bnet, j);', node_type));
+fname = sprintf('%s_CPD', node_type);
+%fprintf('score CPD %d\n', j);
+if isempty(args)
+ bnet.CPD{j} = feval(fname, bnet, j);
+else
+ bnet.CPD{j} = feval(fname, bnet, j, args{:});
+end
+switch scoring_fn
+ case 'bic',
+ fam = [ps j];
+ %score = BIC_score_CPD(bnet.CPD{j}, fam, data, ns, bnet.cnodes);
+ %bnet.CPD{j} = learn_params(bnet.CPD{j}, fam, data, ns, bnet.cnodes);
+
+ % SML 03/16/04 had to special case gaussian b/c generic_CPD/learn_params
+ % no longer supported because of simple interface to learn_params
+ % introduced by KPM for tabular nodes below:
+ % KPM 9 June 04 - tabular nodes have changed back!
+ if 1 % (isempty(find(j==discrete)))
+ bnet.CPD{j} = learn_params(bnet.CPD{j}, fam, data, ns, bnet.cnodes);
+ else
+ bnet.CPD{j} = learn_params(bnet.CPD{j}, data(fam, :));
+ end
+ L = log_prob_node(bnet.CPD{j}, data(j,:), data(ps,:));
+ S = struct(bnet.CPD{j}); % violate object privacy
+ score = L - 0.5*S.nparams*log(ncases);
+ case 'bayesian',
+ %score = bayesian_score_CPD(bnet.CPD{j}, data(fam, :));
+ score = log_marg_prob_node(bnet.CPD{j}, data(j,:), data(ps,:));
+ otherwise,
+ error(['unrecognized scoring fn ' scoring_fn]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/license.gpl.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/license.gpl.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,450 @@
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public
+License version 2 as published by the Free Software Foundation.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+GNU Library General Public License
+
+----------------------------------------------------------------------------
+
+Table of Contents
+
+ * GNU LIBRARY GENERAL PUBLIC LICENSE
+ o Preamble
+ o TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+----------------------------------------------------------------------------
+
+GNU LIBRARY GENERAL PUBLIC LICENSE
+
+Version 2, June 1991
+
+Copyright (C) 1991 Free Software Foundation, Inc.
+675 Mass Ave, Cambridge, MA 02139, USA
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL. It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public Licenses are
+intended to guarantee your freedom to share and change free software--to
+make sure the software is free for all its users.
+
+This license, the Library General Public License, applies to some specially
+designated Free Software Foundation software, and to any other libraries
+whose authors decide to use it. You can use it for your libraries, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom
+to distribute copies of free software (and charge for this service if you
+wish), that you receive source code or can get it if you want it, that you
+can change the software or use pieces of it in new free programs; and that
+you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to
+deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you distribute
+copies of the library, or if you modify it.
+
+For example, if you distribute copies of the library, whether gratis or for
+a fee, you must give the recipients all the rights that we gave you. You
+must make sure that they, too, receive or can get the source code. If you
+link a program with the library, you must provide complete object files to
+the recipients so that they can relink them with the library, after making
+changes to the library and recompiling it. And you must show them these
+terms so they know their rights.
+
+Our method of protecting your rights has two steps: (1) copyright the
+library, and (2) offer you this license which gives you legal permission to
+copy, distribute and/or modify the library.
+
+Also, for each distributor's protection, we want to make certain that
+everyone understands that there is no warranty for this free library. If the
+library is modified by someone else and passed on, we want its recipients to
+know that what they have is not the original version, so that any problems
+introduced by others will not reflect on the original authors' reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that companies distributing free software will
+individually obtain patent licenses, thus in effect transforming the program
+into proprietary software. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+Most GNU software, including some libraries, is covered by the ordinary GNU
+General Public License, which was designed for utility programs. This
+license, the GNU Library General Public License, applies to certain
+designated libraries. This license is quite different from the ordinary one;
+be sure to read it in full, and don't assume that anything in it is the same
+as in the ordinary license.
+
+The reason we have a separate public license for some libraries is that they
+blur the distinction we usually make between modifying or adding to a
+program and simply using it. Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program. However, in a
+textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+Because of this blurred distinction, using the ordinary General Public
+License for libraries did not effectively promote software sharing, because
+most developers did not use the libraries. We concluded that weaker
+conditions might promote sharing better.
+
+However, unrestricted linking of non-free programs would deprive the users
+of those programs of all benefit from the free status of the libraries
+themselves. This Library General Public License is intended to permit
+developers of non-free programs to use free libraries, while preserving your
+freedom as a user of such programs to change the free libraries that are
+incorporated in them. (We have not seen how to achieve this as regards
+changes in header files, but we have achieved it as regards changes in the
+actual functions of the Library.) The hope is that this will lead to faster
+development of free libraries.
+
+The precise terms and conditions for copying, distribution and modification
+follow. Pay close attention to the difference between a "work based on the
+library" and a "work that uses the library". The former contains code
+derived from the library, while the latter only works together with the
+library.
+
+Note that it is possible for a library to be covered by the ordinary General
+Public License rather than by this special one.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License Agreement applies to any software library which contains a
+notice placed by the copyright holder or other authorized party saying it
+may be distributed under the terms of this Library General Public License
+(also called "this License"). Each licensee is addressed as "you".
+
+A "library" means a collection of software functions and/or data prepared so
+as to be conveniently linked with application programs (which use some of
+those functions and data) to form executables.
+
+The "Library", below, refers to any such software library or work which has
+been distributed under these terms. A "work based on the Library" means
+either the Library or any derivative work under copyright law: that is to
+say, a work containing the Library or a portion of it, either verbatim or
+with modifications and/or translated straightforwardly into another
+language. (Hereinafter, translation is included without limitation in the
+term "modification".)
+
+"Source code" for a work means the preferred form of the work for making
+modifications to it. For a library, complete source code means all the
+source code for all modules it contains, plus any associated interface
+definition files, plus the scripts used to control compilation and
+installation of the library.
+
+Activities other than copying, distribution and modification are not covered
+by this License; they are outside its scope. The act of running a program
+using the Library is not restricted, and output from such a program is
+covered only if its contents constitute a work based on the Library
+(independent of the use of the Library in a tool for writing it). Whether
+that is true depends on what the Library does and what the program that uses
+the Library does.
+
+1. You may copy and distribute verbatim copies of the Library's complete
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the notices
+that refer to this License and to the absence of any warranty; and
+distribute a copy of this License along with the Library.
+
+You may charge a fee for the physical act of transferring a copy, and you
+may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Library or any portion of it,
+thus forming a work based on the Library, and copy and distribute such
+modifications or work under the terms of Section 1 above, provided that you
+also meet all of these conditions:
+
+ o a) The modified work must itself be a software library.
+
+ o b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ o c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ o d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If identifiable
+sections of that work are not derived from the Library, and can be
+reasonably considered independent and separate works in themselves, then
+this License, and its terms, do not apply to those sections when you
+distribute them as separate works. But when you distribute the same sections
+as part of a whole which is a work based on the Library, the distribution of
+the whole must be on the terms of this License, whose permissions for other
+licensees extend to the entire whole, and thus to each and every part
+regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest your
+rights to work written entirely by you; rather, the intent is to exercise
+the right to control the distribution of derivative or collective works
+based on the Library.
+
+In addition, mere aggregation of another work not based on the Library with
+the Library (or with a work based on the Library) on a volume of a storage
+or distribution medium does not bring the other work under the scope of this
+License.
+
+3. You may opt to apply the terms of the ordinary GNU General Public License
+instead of this License to a given copy of the Library. To do this, you must
+alter all the notices that refer to this License, so that they refer to the
+ordinary GNU General Public License, version 2, instead of to this License.
+(If a newer version than version 2 of the ordinary GNU General Public
+License has appeared, then you can specify that version instead if you
+wish.) Do not make any other change in these notices.
+
+Once this change is made in a given copy, it is irreversible for that copy,
+so the ordinary GNU General Public License applies to all subsequent copies
+and derivative works made from that copy.
+
+This option is useful when you wish to copy part of the code of the Library
+into a program that is not a library.
+
+4. You may copy and distribute the Library (or a portion or derivative of
+it, under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you accompany it with the complete
+corresponding machine-readable source code, which must be distributed under
+the terms of Sections 1 and 2 above on a medium customarily used for
+software interchange.
+
+If distribution of object code is made by offering access to copy from a
+designated place, then offering equivalent access to copy the source code
+from the same place satisfies the requirement to distribute the source code,
+even though third parties are not compelled to copy the source along with
+the object code.
+
+5. A program that contains no derivative of any portion of the Library, but
+is designed to work with the Library by being compiled or linked with it, is
+called a "work that uses the Library". Such a work, in isolation, is not a
+derivative work of the Library, and therefore falls outside the scope of
+this License.
+
+However, linking a "work that uses the Library" with the Library creates an
+executable that is a derivative of the Library (because it contains portions
+of the Library), rather than a "work that uses the library". The executable
+is therefore covered by this License. Section 6 states terms for
+distribution of such executables.
+
+When a "work that uses the Library" uses material from a header file that is
+part of the Library, the object code for the work may be a derivative work
+of the Library even though the source code is not. Whether this is true is
+especially significant if the work can be linked without the Library, or if
+the work is itself a library. The threshold for this to be true is not
+precisely defined by law.
+
+If such an object file uses only numerical parameters, data structure
+layouts and accessors, and small macros and small inline functions (ten
+lines or less in length), then the use of the object file is unrestricted,
+regardless of whether it is legally a derivative work. (Executables
+containing this object code plus portions of the Library will still fall
+under Section 6.)
+
+Otherwise, if the work is a derivative of the Library, you may distribute
+the object code for the work under the terms of Section 6. Any executables
+containing that work also fall under Section 6, whether or not they are
+linked directly with the Library itself.
+
+6. As an exception to the Sections above, you may also compile or link a
+"work that uses the Library" with the Library to produce a work containing
+portions of the Library, and distribute that work under terms of your
+choice, provided that the terms permit modification of the work for the
+customer's own use and reverse engineering for debugging such modifications.
+
+You must give prominent notice with each copy of the work that the Library
+is used in it and that the Library and its use are covered by this License.
+You must supply a copy of this License. If the work during execution
+displays copyright notices, you must include the copyright notice for the
+Library among them, as well as a reference directing the user to the copy of
+this License. Also, you must do one of these things:
+
+ o a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood that
+ the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ o b) Accompany the work with a written offer, valid for at least
+ three years, to give the same user the materials specified in
+ Subsection 6a, above, for a charge no more than the cost of
+ performing this distribution.
+
+ o c) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ o d) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+For an executable, the required form of the "work that uses the Library"
+must include any data and utility programs needed for reproducing the
+executable from it. However, as a special exception, the source code
+distributed need not include anything that is normally distributed (in
+either source or binary form) with the major components (compiler, kernel,
+and so on) of the operating system on which the executable runs, unless that
+component itself accompanies the executable.
+
+It may happen that this requirement contradicts the license restrictions of
+other proprietary libraries that do not normally accompany the operating
+system. Such a contradiction means you cannot use both them and the Library
+together in an executable that you distribute.
+
+7. You may place library facilities that are a work based on the Library
+side-by-side in a single library together with other library facilities not
+covered by this License, and distribute such a combined library, provided
+that the separate distribution of the work based on the Library and of the
+other library facilities is otherwise permitted, and provided that you do
+these two things:
+
+ o a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ o b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+8. You may not copy, modify, sublicense, link with, or distribute the
+Library except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense, link with, or distribute the Library
+is void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under this
+License will not have their licenses terminated so long as such parties
+remain in full compliance.
+
+9. You are not required to accept this License, since you have not signed
+it. However, nothing else grants you permission to modify or distribute the
+Library or its derivative works. These actions are prohibited by law if you
+do not accept this License. Therefore, by modifying or distributing the
+Library (or any work based on the Library), you indicate your acceptance of
+this License to do so, and all its terms and conditions for copying,
+distributing or modifying the Library or works based on it.
+
+10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the original
+licensor to copy, distribute, link with or modify the Library subject to
+these terms and conditions. You may not impose any further restrictions on
+the recipients' exercise of the rights granted herein. You are not
+responsible for enforcing compliance by third parties to this License.
+
+11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot distribute so
+as to satisfy simultaneously your obligations under this License and any
+other pertinent obligations, then as a consequence you may not distribute
+the Library at all. For example, if a patent license would not permit
+royalty-free redistribution of the Library by all those who receive copies
+directly or indirectly through you, then the only way you could satisfy both
+it and this License would be to refrain entirely from distribution of the
+Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any patents
+or other property right claims or to contest validity of any such claims;
+this section has the sole purpose of protecting the integrity of the free
+software distribution system which is implemented by public license
+practices. Many people have made generous contributions to the wide range of
+software distributed through that system in reliance on consistent
+application of that system; it is up to the author/donor to decide if he or
+she is willing to distribute software through any other system and a
+licensee cannot impose that choice.
+
+This section is intended to make thoroughly clear what is believed to be a
+consequence of the rest of this License.
+
+12. If the distribution and/or use of the Library is restricted in certain
+countries either by patents or by copyrighted interfaces, the original
+copyright holder who places the Library under this License may add an
+explicit geographical distribution limitation excluding those countries, so
+that distribution is permitted only in or among countries not thus excluded.
+In such case, this License incorporates the limitation as if written in the
+body of this License.
+
+13. The Free Software Foundation may publish revised and/or new versions of
+the Library General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Library does not specify a license version
+number, you may choose any version ever published by the Free Software
+Foundation.
+
+14. If you wish to incorporate parts of the Library into other free programs
+whose distribution conditions are incompatible with these, write to the
+author to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals of
+preserving the free status of all derivatives of our free software and of
+promoting the sharing and reuse of software generally.
+
+NO WARRANTY
+
+15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
+THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO
+THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY
+PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR
+CORRECTION.
+
+16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO
+LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR
+THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER
+SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public
+License version 2 as published by the Free Software Foundation.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+/cg_can_to_mom.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/cg_mom_to_can.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/cgpot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/display.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/divide_by_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/domain_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/enter_cts_evidence_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/enter_discrete_evidence_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/marginalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/multiply_by_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/multiply_pots.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/normalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/pot_to_marginal.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/set_domain_pot.m/1.1.1.1/Wed Jul 30 13:38:24 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/@cgpot
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+/normalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/set_domain_pot.m/1.1.1.1/Wed Jul 30 13:38:08 2003//
+/simple_marginalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/@cgpot/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/normalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/normalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function [pot, loglik] = normalize_pot(pot)
+% NORMALIZE_POT Convert the CG potential Pr(X,E) into Pr(X|E) and return log Pr(E).
+% [pot, loglik] = normalize_pot(pot)
+
+% Marginalize down to [], so that the normalizing constant becomes Pr(E)
+temp = marginalize_pot(cg_can_to_mom(pot), []);
+%loglik = temp.mom{1}.logp;
+[temp2, loglik] = normalize_pot(temp.mom{1});
+
+% Adjust scale factor to reflect the fact that the pot now represents Pr(X | E) instead of Pr(X,E).
+
+scale = -loglik;
+if 1
+switch pot.subtype
+ case 'm'
+ for i=1:pot.dsize
+ pot.mom{i} = rescale_pot(pot.mom{i}, scale);
+ end
+ case 'c'
+ for i=1:pot.dsize
+ pot.can{i} = rescale_pot(pot.can{i}, scale);
+ end
+end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/set_domain_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/set_domain_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function pot = set_domain_pot(pot, domain)
+% SET_DOMAIN_POT Change the domain of a potential (cgpot)
+% pot = set_domain_pot(pot, domain)
+
+delta = domain(1) - pot.domain(1);
+assert(all(domain == pot.domain + delta));
+pot.domain = pot.domain + delta;
+pot.ddom = pot.ddom + delta;
+pot.cdom = pot.cdom + delta;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/simple_marginalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/Old/simple_marginalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+function smallpot = marginalize_pot(bigpot, keep)
+% MARGINALIZE_POT Marginalize a cgpot onto a smaller domain.
+% smallpot = marginalize_pot(bigpot, keep)
+
+sumover = mysetdiff(bigpot.domain, keep);
+csumover = myintersect(sumover, bigpot.cdom);
+dsumover = myintersect(sumover, bigpot.ddom);
+dkeep = myintersect(keep, bigpot.ddom);
+ckeep = myintersect(keep, bigpot.cdom);
+%ns = sparse(1, max(bigpot.domain)); % must be full, so I is an integer
+ns = zeros(1, max(bigpot.domain));
+ns(bigpot.ddom) = bigpot.dsizes;
+ns(bigpot.cdom) = bigpot.csizes;
+
+% sum(ns(csumover))==0 is like isempty(csumover) but handles observed nodes.
+% Similarly, prod(ns(dsumover))==1 is like isempty(dsumover)
+
+% Marginalize the cts parts.
+% If we are in canonical form, we stay that way, since moment form might not exist.
+% Besides, we would like to minimize the number of conversions.
+if sum(ns(csumover)) > 0
+ if bigpot.subtype == 'm'
+ for i=1:bigpot.dsize
+ bigpot.mom{i} = marginalize_pot(bigpot.mom{i}, ckeep);
+ end
+ else
+ for i=1:bigpot.dsize
+ bigpot.can{i} = marginalize_pot(bigpot.can{i}, ckeep);
+ end
+ end
+end
+
+% If we are not marginalizing over any discrete nodes, we are done.
+if prod(ns(dsumover))==1
+ smallpot = cgpot(dkeep, ckeep, ns, bigpot.can, bigpot.mom, bigpot.subtype);
+ return;
+end
+
+% To marginalize the discrete parts, we must be in moment form.
+bigpot = cg_can_to_mom(bigpot);
+
+I = prod(ns(dkeep));
+J = prod(ns(dsumover));
+C = sum(ns(ckeep));
+
+% Reshape bigpot into the form mu1(:,j,i), where i is in dkeep, j is in dsumover
+T1 = zeros(I,J);
+mu1 = zeros(C,J,I);
+Sigma1 = zeros(C,C,J,I);
+sum_map = find_equiv_posns(dsumover, bigpot.ddom);
+keep_map = find_equiv_posns(dkeep, bigpot.ddom);
+iv = zeros(1, length(bigpot.ddom)); % index vector
+for i=1:I
+ keep_iv = ind2subv(ns(dkeep), i);
+ iv(keep_map) = keep_iv;
+ for j=1:J
+ sum_iv = ind2subv(ns(dsumover), j);
+ iv(sum_map) = sum_iv;
+ k = subv2ind(ns(bigpot.ddom), iv);
+ mom = struct(bigpot.mom{k}); % violate object privacy
+ T1(i,j) = exp(mom.logp);
+ if C > 0 % so mu1 and Sigma1 are non-empty
+ mu1(:,j,i) = mom.mu;
+ Sigma1(:,:,j,i) = mom.Sigma;
+ end
+ end
+end
+
+% Collapse the mixture of Gaussians
+coef = mk_stochastic(T1); % coef must be convex combination
+T2 = sum(T1,2);
+T2 = T2 + (T2==0)*eps;
+%if C > 0, disp('collapsing onto '); disp(leep); end
+mu = [];
+Sigma = [];
+mom = cell(1,I);
+for i=1:I
+ if C > 0
+ [mu, Sigma] = collapse_mog(mu1(:,:,i), Sigma1(:,:,:,i), coef(i,:));
+ end
+ logp = log(T2(i));
+ mom{i} = mpot(ckeep, ns(ckeep), logp, mu, Sigma);
+end
+
+smallpot = cgpot(dkeep, ckeep, ns, [], mom, 'm');
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/cg_can_to_mom.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/cg_can_to_mom.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function pot = cg_can_to_mom(pot)
+% CG_CAN_TO_MOM Convert a CG potential from canonical to moment form, if necessary.
+% pot = cg_can_to_mom(pot)
+
+if pot.subtype ~= 'm'
+ for i=1:pot.dsize
+ pot.mom{i} = cpot_to_mpot(pot.can{i});
+ end
+ pot.subtype = 'm';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/cg_mom_to_can.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/cg_mom_to_can.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function pot = cg_mom_to_can(pot)
+% CG_MOM_TO_CAN Convert a CG potential from moment to canonical form, if necessary.
+% pot = cg_mom_to_can(pot)
+
+if pot.subtype ~= 'c'
+ for i=1:pot.dsize
+ pot.can{i} = mpot_to_cpot(pot.mom{i});
+ end
+ pot.subtype = 'c';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/cgpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/cgpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function pot = cgpot(ddom, cdom, node_sizes, can, mom, subtype)
+% CPOT Make a canonical CG potential.
+% function pot = cgpot(ddom, cdom, node_sizes, can, mom, subtype)
+%
+% node_sizes(i) is the size of the i'th node.
+% can and mom default to 0s.
+% subtype defaults to 'c'.
+
+if nargin < 6, subtype = 'c'; end
+
+pot.ddom = ddom;
+pot.cdom = cdom;
+node_sizes = node_sizes(:)'; % row vectors print better
+pot.domain = myunion(ddom, cdom);
+pot.dsizes = node_sizes(pot.ddom);
+pot.dsize = prod(node_sizes(pot.ddom));
+pot.csizes = node_sizes(pot.cdom);
+pot.csize = sum(node_sizes(pot.cdom));
+pot.subtype = subtype;
+
+if nargin < 4
+ can = cell(1, pot.dsize);
+ for i=1:pot.dsize
+ can{i} = cpot(cdom, node_sizes(cdom));
+ end
+end
+pot.can = can;
+
+if nargin < 5
+ mom = cell(1, pot.dsize);
+ for i=1:pot.dsize
+ mom{i} = mpot(cdom, node_sizes(cdom));
+ end
+end
+pot.mom = mom;
+
+pot = class(pot, 'cgpot');
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(pot)
+
+disp('conditional Gaussian potential object');
+disp(struct(pot));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/divide_by_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/divide_by_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function bigpot = divide_by_pot(bigpot, smallpot)
+% DIVIDE_BY_POT bigpot /= smallpot for cgpot
+% bigpot = divide_by_pot(bigpot, smallpot)
+%
+% smallpot's domain must be a subset of bigpot's domain.
+
+bigpot = cg_mom_to_can(bigpot);
+smallpot = cg_mom_to_can(smallpot);
+
+mask = find_equiv_posns(smallpot.ddom, bigpot.ddom);
+for i=1:bigpot.dsize
+ if isempty(smallpot.ddom)
+ src = 1;
+ else
+ sub = ind2subv(bigpot.dsizes, i);
+ src = subv2ind(smallpot.dsizes, sub(mask));
+ end
+ bigpot.can{i} = divide_by_pot(bigpot.can{i}, smallpot.can{src});
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/domain_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/domain_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function dom = domain_pot(pot)
+% DOMAIN_POT Return the domain of this cgpot.
+% dom = domain_pot(pot)
+
+dom = pot.domain;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/enter_cts_evidence_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/enter_cts_evidence_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,8 @@
+function pot = enter_cts_evidence_pot(pot, Y, y)
+% function pot = enter_cts_evidence_pot(pot, Y, y) cgpot
+
+
+pot = cg_mom_to_can(pot);
+for i=1:pot.dsize
+ pot.can{i} = enter_cts_evidence_pot(pot.can{i}, Y, y);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/enter_discrete_evidence_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/enter_discrete_evidence_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function pot = enter_discrete_evidence_pot(pot, Y, y)
+
+%ns = sparse(1, max(pot.domain));
+ns = zeros(1, max(pot.domain));
+ns(pot.ddom) = pot.dsizes;
+ns(pot.cdom) = pot.csizes;
+
+ddom = pot.ddom;
+S = prod(ns(ddom));
+sub = ind2subv(ns(ddom), 1:S);
+mask = find_equiv_posns(Y, ddom);
+sub(mask) = y;
+ndx = subv2ind(ns(ddom), sub);
+
+pot.can = pot.can(ndx);
+pot.mom = pot.mom(ndx);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/marginalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/marginalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,153 @@
+function smallpot = marginalize_pot(bigpot, keep, maximize, useC)
+% MARGINALIZE_POT Marginalize a cgpot onto a smaller domain.
+% smallpot = marginalize_pot(bigpot, keep, maximize, useC)
+%
+% If maximize = 1, we raise an error.
+% useC is ignored.
+
+if nargin < 3, maximize = 0; end
+assert(~maximize);
+
+
+sumover = mysetdiff(bigpot.domain, keep);
+csumover = myintersect(sumover, bigpot.cdom);
+dsumover = myintersect(sumover, bigpot.ddom);
+dkeep = myintersect(keep, bigpot.ddom);
+ckeep = myintersect(keep, bigpot.cdom);
+%ns = sparse(1, max(bigpot.domain)); % must be full, so I is an integer
+ns = zeros(1, max(bigpot.domain));
+ns(bigpot.ddom) = bigpot.dsizes;
+ns(bigpot.cdom) = bigpot.csizes;
+
+% sum(ns(csumover))==0 is like isempty(csumover) but handles observed nodes.
+% Similarly, prod(ns(dsumover))==1 is like isempty(dsumover)
+
+% Marginalize the cts parts.
+% If we are in canonical form, we stay that way, since moment form might not exist.
+% Besides, we would like to minimize the number of conversions.
+if sum(ns(csumover)) > 0
+ if bigpot.subtype == 'm'
+ for i=1:bigpot.dsize
+ bigpot.mom{i} = marginalize_pot(bigpot.mom{i}, ckeep);
+ end
+ else
+ for i=1:bigpot.dsize
+ bigpot.can{i} = marginalize_pot(bigpot.can{i}, ckeep);
+ end
+ end
+end
+
+% If we are not marginalizing over any discrete nodes, we are done.
+if prod(ns(dsumover))==1
+ smallpot = cgpot(dkeep, ckeep, ns, bigpot.can, bigpot.mom, bigpot.subtype);
+ return;
+end
+
+% To marginalize the discrete parts, we partition the cts parts into those that depend
+% on dkeep (i) and those that depend on on dsumover (j).
+
+I = prod(ns(dkeep));
+J = prod(ns(dsumover));
+C = sum(ns(ckeep));
+sum_map = find_equiv_posns(dsumover, bigpot.ddom);
+keep_map = find_equiv_posns(dkeep, bigpot.ddom);
+iv = zeros(1, length(bigpot.ddom)); % index vector
+
+% If in canonical form, marginalize if possible, else convert to moment form.
+if 0 & bigpot.subtype == 'c'
+ p1 = zeros(I,J);
+ h1 = zeros(C,J,I);
+ K1 = zeros(C,C,J,I);
+ for i=1:I
+ keep_iv = ind2subv(ns(dkeep), i);
+ iv(keep_map) = keep_iv;
+ for j=1:J
+ sum_iv = ind2subv(ns(dsumover), j);
+ iv(sum_map) = sum_iv;
+ k = subv2ind(ns(bigpot.ddom), iv);
+ can = struct(bigpot.can{k}); % violate object privacy
+ p1(i,j) = exp(can.g);
+ if C > 0 % so mu1 and Sigma1 are non-empty
+ h1(:,j,i) = can.h;
+ K1(:,:,j,i) = can.K;
+ end
+ end
+ end
+
+ % If the cts parts do not depend on j, we can just marginalize the weighting coefficient g.
+ jdepends = 0;
+ for i=1:I
+ for j=2:J
+ if ~approxeq(h1(:,j,i), h1(:,1,i)) | ~approxeq(K1(:,:,j,i), K1(:,:,1,i))
+ jdepends = 1;
+ break
+ end
+ end
+ end
+
+ if ~jdepends
+ %g2 = log(sum(p1, 2));
+ g2 = zeros(I,1);
+ for i=1:I
+ s = sum(p1(i,:));
+ if s > 0
+ g2(i) = log(s);
+ end
+ end
+ h2 = h1;
+ K2 = K1;
+ can = cell(1,I);
+ j = 1; % arbitrary
+ for i=1:I
+ can{i} = cpot(ckeep, ns(ckeep), g2(i), h2(:,j,i), K2(:,:,j,i));
+ end
+ smallpot = cgpot(dkeep, ckeep, ns, can, [], 'c');
+ return;
+ else
+ % Since the cts parts depend on j, we must convert to moment form
+ bigpot = cg_can_to_mom(bigpot);
+ end
+end
+
+
+% Marginalize in moment form
+bigpot = cg_can_to_mom(bigpot);
+
+% Now partition the moment components.
+T1 = zeros(I,J);
+mu1 = zeros(C,J,I);
+Sigma1 = zeros(C,C,J,I);
+for i=1:I
+ keep_iv = ind2subv(ns(dkeep), i);
+ iv(keep_map) = keep_iv;
+ for j=1:J
+ sum_iv = ind2subv(ns(dsumover), j);
+ iv(sum_map) = sum_iv;
+ k = subv2ind(ns(bigpot.ddom), iv);
+ mom = struct(bigpot.mom{k}); % violate object privacy
+ T1(i,j) = exp(mom.logp);
+ if C > 0 % so mu1 and Sigma1 are non-empty
+ mu1(:,j,i) = mom.mu;
+ Sigma1(:,:,j,i) = mom.Sigma;
+ end
+ end
+end
+
+% Collapse the mixture of Gaussians
+coef = mk_stochastic(T1); % coef must be convex combination
+T2 = sum(T1,2);
+T2 = T2 + (T2==0)*eps;
+%if C > 0, disp('collapsing onto '); disp(leep); end
+mu = [];
+Sigma = [];
+mom = cell(1,I);
+for i=1:I
+ if C > 0
+ [mu, Sigma] = collapse_mog(mu1(:,:,i), Sigma1(:,:,:,i), coef(i,:));
+ end
+ logp = log(T2(i));
+ mom{i} = mpot(ckeep, ns(ckeep), logp, mu, Sigma);
+end
+
+smallpot = cgpot(dkeep, ckeep, ns, [], mom, 'm');
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/multiply_by_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/multiply_by_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function bigpot = multiply_by_pot(bigpot, smallpot, varargin)
+% MULTIPLY_BY_POT bigpot *= smallpot for cgpot
+% bigpot = multiply_by_pot(bigpot, smallpot)
+%
+% smallpot's domain must be a subset of bigpot's domain.
+
+bigpot = cg_mom_to_can(bigpot);
+smallpot = cg_mom_to_can(smallpot);
+
+mask = find_equiv_posns(smallpot.ddom, bigpot.ddom);
+for i=1:bigpot.dsize
+ if isempty(smallpot.ddom)
+ src = 1;
+ else
+ sub = ind2subv(bigpot.dsizes, i);
+ src = subv2ind(smallpot.dsizes, sub(mask));
+ end
+ bigpot.can{i} = multiply_by_pot(bigpot.can{i}, smallpot.can{src});
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/multiply_pots.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/multiply_pots.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function T = multiply_pots(T1, T2)
+% MULTIPLY_POTS Multiply a pair of dpots together pointwise (cgpot)
+% T = multiply_pots(pots)
+
+ddom = myunion(T1.ddom, T2.ddom);
+cdom = myunion(T1.cdom, T2.cdom);
+dom = myunion(ddom, cdom);
+ns = zeros(1, max(dom));
+ns(T1.ddom) = T1.dsizes;
+ns(T2.ddom) = T2.dsizes;
+ns(T1.cdom) = T1.csizes;
+ns(T2.cdom) = T2.csizes;
+
+T = cgpot(ddom, cdom, ns);
+T = multiply_by_pot(T, T1);
+T = multiply_by_pot(T, T2);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/normalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/normalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function [pot, loglik] = normalize_pot(pot)
+% NORMALIZE_POT Convert the CG potential Pr(X,E) into Pr(X|E) and return log Pr(E).
+% [pot, loglik] = normalize_pot(pot)
+
+% Marginalize down to [], so that the normalizing constant becomes Pr(E)
+temp = cg_can_to_mom(marginalize_pot(pot, []));
+%loglik = temp.mom{1}.logp;
+[temp2, loglik] = normalize_pot(temp.mom{1});
+
+% Adjust scale factor to reflect the fact that the pot now represents Pr(X | E) instead of Pr(X,E).
+
+scale = -loglik;
+if 1
+switch pot.subtype
+ case 'm'
+ for i=1:pot.dsize
+ pot.mom{i} = rescale_pot(pot.mom{i}, scale);
+ end
+ case 'c'
+ for i=1:pot.dsize
+ pot.can{i} = rescale_pot(pot.can{i}, scale);
+ end
+end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/pot_to_marginal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/pot_to_marginal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function m = pot_to_marginal(pot)
+% POT_TO_MARGINAL Convert a cgpot to a marginal structure.
+% m = pot_to_marginal(pot)
+
+pot = cg_can_to_mom(pot);
+m.domain = pot.domain;
+n = pot.csize;
+d = length(pot.mom);
+if n==0
+ m.mu = [];
+ m.Sigma = [];
+else
+ m.mu = zeros(n, d);
+ m.Sigma = zeros(n, n, d);
+end
+m.T = 0*myones(pot.dsizes);
+for i=1:pot.dsize
+ s = struct(pot.mom{i}); % violate privacy of object
+ if n > 0
+ m.mu(:,i) = s.mu;
+ m.Sigma(:,:,i) = s.Sigma;
+ end
+ m.T(i) = exp(s.logp);
+end
+if isvectorBNT(m.T)
+ m.T = m.T(:)';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/set_domain_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cgpot/set_domain_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function pot = set_domain_pot(pot, domain)
+% SET_DOMAIN_POT Change the domain of a potential (cgpot)
+% pot = set_domain_pot(pot, domain)
+
+delta = domain(1) - pot.domain(1);
+assert(all(domain == pot.domain + delta));
+pot.domain = pot.domain + delta;
+pot.ddom = pot.ddom + delta;
+pot.cdom = pot.cdom + delta;
+cdomain = pot.cdom;
+n = prod(pot.dsizes);
+if(pot.subtype == 'm')
+ for i = 1: n
+ pot.mom{i} = set_domain_pot(pot.mom{i}, cdomain);
+ end
+end
+if(pot.subtype == 'c')
+ for i = 1: n
+ pot.can{i} = set_domain_pot(pot.can{i}, cdomain);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+/cpot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/cpot_to_mpot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/display.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/divide_by_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/domain_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/enter_cts_evidence_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/marginalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/multiply_by_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/multiply_pots.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/normalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/pot_to_marginal.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/rescale_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/set_domain_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+A D/Old////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/@cpot
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+/cpot_to_mpot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/normalize_pot.convert.m/1.1.1.1/Wed May 29 15:59:58 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/@cpot/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/cpot_to_mpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/cpot_to_mpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function mom = cpot_to_mpot(can)
+% CPOT_TO_MPOT Convert a canonical potential to moment form.
+% mom = cpot_to_mpot(can)
+
+[logp, mu, Sigma] = canonical_to_moment(can.g, can.h, can.K);
+mom = mpot(can.domain, can.sizes, logp, mu, Sigma);
+
+%%%%%%%
+
+function [logp, mu, Sigma] = canonical_to_moment(g, h, K)
+% CANONICAL_TO_MOMENT Convert canonical characteristics to moment form.
+% [logp, mu, Sigma] = canonical_to_moment(g, h, K)
+
+if det(K)==0
+ Sigma = inf*size(K);
+else
+ Sigma = inv(K);
+end
+mu = Sigma*h;
+n = length(mu);
+if isempty(mu)
+ logp = g - 0.5*(log(det(K)) - n*log(2*pi));
+else
+ logp = g - 0.5*(log(det(K)) - n*log(2*pi) - mu'*K*mu);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/normalize_pot.convert.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/Old/normalize_pot.convert.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function [mom2, loglik] = normalize_pot(can)
+% NORMALIZE_POT Convert the canonical potential Pr(X,E) into moment potential Pr(X|E) and return log Pr(E).
+% [mom, loglik] = normalize_pot(can)
+
+mom = cpot_to_mpot(can);
+mom = struct(mom); % violate privacy of object
+loglik = mom.logp;
+%mom.logp = 0; % now represents Pr(X | E) instead of Pr(X, E).
+mom2 = mpot(mom.domain, mom.sizes, 0, mom.mu, mom.Sigma);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/cpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/cpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function pot = cpot(members, sizes, g, h, K)
+% CPOT Make a canonical Gaussian potential.
+% pot = cpot(members, sizes, g, h, K)
+%
+% All params default to 0 if omitted.
+
+n = sum(sizes);
+if nargin < 3, g = 0; end
+if nargin < 4, h = zeros(n,1); end
+if nargin < 5, K = zeros(n,n); end
+
+pot.domain = members;
+pot.sizes = sizes(:)';
+pot.g = g;
+pot.h = h;
+pot.K = K;
+pot = class(pot, 'cpot');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/cpot_to_mpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/cpot_to_mpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function mom = cpot_to_mpot(can)
+% CPOT_TO_MPOT Convert a canonical potential to moment form.
+% mom = cpot_to_mpot(can)
+
+[logp, mu, Sigma] = canonical_to_moment(can.g, can.h, can.K);
+mom = mpot(can.domain, can.sizes, logp, mu, Sigma);
+
+%%%%%%%
+
+function [logp, mu, Sigma] = canonical_to_moment(g, h, K)
+% CANONICAL_TO_MOMENT Convert canonical characteristics to moment form.
+% [logp, mu, Sigma] = canonical_to_moment(g, h, K)
+
+n = length(K);
+if isempty(K)
+ logp = g - 0.5*(log(det(K)) - n*log(2*pi));
+ Sigma = [];
+ mu = [];
+else
+ if det(K)==0
+ Sigma = inf*ones(n,n);
+ mu = zeros(n,1); % if the precision is zero, the mean is arbitrary
+ logp = g; % the scaling factor for the uniform distribution is 1
+ else
+ Sigma = inv(K);
+ mu = Sigma*h;
+ logp = g - 0.5*(log(det(K)) - n*log(2*pi) - mu'*K*mu);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(pot)
+
+disp('canonical potential object');
+disp(struct(pot));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/divide_by_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/divide_by_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function bigpot = divide_by_pot(bigpot, smallpot)
+% DIVIDE_BY_POT bigpot /= smallpot for cpot
+% bigpot = divide_by_pot(bigpot, smallpot)
+%
+% smallpot's domain must be a subset of bigpot's domain.
+
+bigpot.g = bigpot.g - smallpot.g;
+if sum(smallpot.sizes) > 0
+ mask = find_equiv_posns(smallpot.domain, bigpot.domain);
+ u = block(mask, bigpot.sizes);
+ bigpot.h(u) = bigpot.h(u) - smallpot.h;
+ bigpot.K(u, u) = bigpot.K(u, u) - smallpot.K;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/domain_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/domain_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function dom = domain_pot(pot)
+% DOMAIN_POT Return the domain of this cpot.
+% dom = domain_pot(pot)
+
+dom = pot.domain;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/enter_cts_evidence_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/enter_cts_evidence_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function pot = enter_cts_evidence_pot(pot, Y, y)
+% function pot = enter_cts_evidence_pot(pot, Y, y) (cpot)
+
+ns = sparse(1, max(pot.domain));
+ns(pot.domain) = pot.sizes;
+
+X = mysetdiff(pot.domain, Y);
+[hx, hy, KXX, KXY, KYX, KYY] = partition_matrix_vec(pot.h, pot.K, X, Y, ns);
+pot.g = pot.g + hy'*y - 0.5*y'*KYY*y;
+if ~isempty(X)
+ pot.h = hx - KXY*y;
+ pot.K = KXX;
+end
+
+pot.sizes(find_equiv_posns(Y,pot.domain)) = 0;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/marginalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/marginalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+function smallpot = marginalize_pot(bigpot, keep, maximize, useC)
+% MARGINALIZE_POT Marginalize a cpot onto a smaller domain.
+% smallpot = marginalize_pot(bigpot, keep, maximize, useC)
+%
+% The maximize argument is ignored - maxing out a Gaussian is the same as summing it out,
+% since the mode and mean are equal.
+% The useC argument is ignored.
+
+node_sizes = sparse(1, max(bigpot.domain));
+node_sizes(bigpot.domain) = bigpot.sizes;
+sum_over = mysetdiff(bigpot.domain, keep);
+
+if sum(node_sizes(sum_over))==0 % isempty(sum_over)
+ %smallpot = bigpot;
+ smallpot = cpot(keep, node_sizes(keep), bigpot.g, bigpot.h, bigpot.K);
+else
+ [h1, h2, K11, K12, K21, K22] = partition_matrix_vec(bigpot.h, bigpot.K, sum_over, keep, node_sizes);
+ n = length(h1);
+ K11inv = inv(K11);
+ g = bigpot.g + 0.5*(n*log(2*pi) - log(det(K11)) + h1'*K11inv*h1);
+ if length(h2) > 0 % ~isempty(keep) % we are are actually keeping something
+ A = K21*K11inv;
+ h = h2 - A*h1;
+ K = K22 - A*K12;
+ else
+ h = [];
+ K = [];
+ end
+ smallpot = cpot(keep, node_sizes(keep), g, h, K);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/multiply_by_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/multiply_by_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function bigpot = multiply_by_pot(bigpot, smallpot, varargin)
+% MULTIPLY_BY_POT bigpot *= smallpot for cpot
+% bigpot = multiply_by_pot(bigpot, smallpot)
+%
+% smallpot's domain must be a subset of bigpot's domain.
+
+bigpot.g = bigpot.g + smallpot.g;
+if sum(smallpot.sizes) > 0
+ mask = find_equiv_posns(smallpot.domain, bigpot.domain);
+ u = block(mask, bigpot.sizes);
+ bigpot.h(u) = bigpot.h(u) + smallpot.h;
+ bigpot.K(u, u) = bigpot.K(u, u) + smallpot.K;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/multiply_pots.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/multiply_pots.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function T = multiply_pots(T1, T2)
+% MULTIPLY_POTS Multiply a pair of dpots together pointwise (cpot)
+% T = multiply_pots(pots)
+
+dom = myunion(T1.domain, T2.domain);
+ns = sparse(1, max(dom));
+ns(T1.domain) = T1.sizes;
+ns(T2.domain) = T2.sizes;
+T = cpot(dom, ns(dom));
+T = multiply_by_pot(T, T1);
+T = multiply_by_pot(T, T2);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/normalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/normalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function [pot, loglik] = normalize_pot(pot)
+% NORMALIZE_POT Convert the canonical potential Pr(X,E) into Pr(X|E) and return log Pr(E).
+% [pot, loglik] = normalize_pot(pot)
+
+mom = cpot_to_mpot(pot); % move the normalizing constant out of g, to reveal the coefficient
+%loglik = scaling_factor_pot(mom);
+%loglik = mom.logp;
+[temp, loglik] = normalize_pot(mom);
+pot.g = pot.g - loglik;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/pot_to_marginal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/pot_to_marginal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function m = pot_to_marginal(pot)
+% POT_TO_MARGINAL Convert a cpot to a marginal structure.
+% m = pot_to_marginal(pot)
+
+mom = cpot_to_mpot(pot);
+m = pot_to_marginal(mom);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/rescale_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/rescale_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function pot = rescale_pot(pot, s)
+% RESCALE_POT Add a constant to the cpot scale factor.
+% pot = rescale_pot(pot, s)
+
+pot.g = pot.g + s;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/set_domain_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@cpot/set_domain_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function pot = set_domain_pot(pot, domain)
+% SET_DOMAIN_POT Change the domain of a potential (dpot)
+% pot = set_domain_pot(pot, domain)
+
+pot.domain = domain;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+/approxeq_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/display.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/divide_by_pot.m/1.1.1.1/Thu Aug 5 15:25:08 2004//
+/domain_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/dpot.m/1.1.1.1/Tue Oct 1 19:04:44 2002//
+/dpot_to_table.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/find_most_prob_entry.m/1.1.1.1/Sun Jun 16 19:06:20 2002//
+/get_fields.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/marginalize_pot.m/1.1.1.1/Wed Aug 4 19:59:14 2004//
+/multiply_by_pot.m/1.1.1.1/Wed Aug 4 19:59:14 2004//
+/multiply_pots.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/normalize_pot.m/1.1.1.1/Wed Aug 4 15:54:48 2004//
+/pot_to_marginal.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/set_domain_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/subsasgn.m/1.1.1.1/Wed Apr 27 18:34:48 2005//
+/subsref.m/1.1.1.1/Wed Apr 27 18:34:48 2005//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/@dpot
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/approxeq_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/approxeq_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function p = approxeq_pot(A, B, tol)
+
+if nargin < 3, tol = 1e-3; end
+
+p = approxeq(A.T, B.T, tol);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(pot)
+
+disp('discrete potential object');
+disp(struct(pot));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/divide_by_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/divide_by_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,14 @@
+function Tbig = divide_by_pot(Tbig, Tsmall)
+% DIVIDE_BY_POT Tbig /= Tsmall
+% Tbig = divide_by_pot(Tbig, Tsmall)
+%
+% Tsmall's domain must be a subset of Tbig's domain.
+
+%process sparse dpot, we do not concern only one of the two pots is sparse
+if issparse(Tbig.T) & issparse(Tsmall.T)
+ Tbig.T = divide_by_sparse_table(Tbig.T, Tbig.domain, Tbig.sizes, Tsmall.T, Tsmall.domain, Tsmall.sizes);
+else
+ Tbig.T = divide_by_table(Tbig.T, Tbig.domain, Tbig.sizes, Tsmall.T, Tsmall.domain, Tsmall.sizes);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/domain_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/domain_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function dom = domain_pot(pot)
+% DOMAIN_POT Return the domain of this dpot.
+% dom = domain_pot(pot)
+
+dom = pot.domain;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/dpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/dpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function pot = dpot(domain, sizes, T)
+% DPOT Make a discrete (sparse) potential.
+% pot = dpot(domain, sizes, T, spar)
+%
+% sizes(i) is the size of the i'th domain element.
+% T defaults to all 1s.
+
+%assert(length(sizes) == length(domain));
+
+pot.domain = domain(:)'; % so we can see it when we display
+if nargin < 3
+ pot.T = myones(sizes);
+ %pot.T = ones(1,prod(sizes)); % 1D vector
+else
+ if isempty(T)
+ pot.T = [];
+ else
+ if issparse(T)
+ pot.T = T;
+ else
+ pot.T = myreshape(T, sizes);
+ end
+ end
+end
+pot.sizes = sizes(:)';
+pot = class(pot, 'dpot');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/dpot_to_table.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/dpot_to_table.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+function T = dpot_to_table(pot)
+
+T = pot.T;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/find_most_prob_entry.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/find_most_prob_entry.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+function [indices, pot] = find_most_prob_entry(pot)
+% function [indices, pot] = find_most_prob_entry(pot)
+% function [indices, pot] = find_most_prob_entry(pot)
+% Find the indices of the argmax, and set all other enties to 0.
+
+%indices = argmax(pot.T);
+[m i] = max(pot.T(:));
+indices = ind2subv(pot.sizes, i);
+pot.T = 0*myones(pot.sizes);
+pot.T(i) = m;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/get_fields.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/get_fields.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function val = get_params(pot, name)
+% GET_PARAMS Accessor function for a field (dpot)
+% val = get_params(pot, name)
+%
+% e.g., get_params(pot, 'table') or 'domain'
+
+switch name
+ case 'table', val = pot.T;
+ case 'domain', val = pot.domain;
+ otherwise,
+ error(['invalid field name ' name]);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/marginalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/marginalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function smallpot = marginalize_pot(bigpot, onto, maximize)
+% MARGINALIZE_POT Marginalize a dpot onto a smaller domain.
+% smallpot = marginalize_pot(bigpot, onto, maximize)
+%
+% 'onto' must be in ascending order.
+
+if nargin < 3, maximize = 0; end
+
+ns = zeros(1, max(bigpot.domain));
+ns(bigpot.domain) = bigpot.sizes;
+%assert(isequal(bigpot.sizes, mysize(bigpot.T))); % may fail if there are trailing dimensions of size 1
+if issparse(bigpot.T)
+ smallT = marg_sparse_table(bigpot.T, bigpot.domain, bigpot.sizes, onto, maximize);
+else
+ smallT = marg_table(bigpot.T, bigpot.domain, bigpot.sizes, onto, maximize);
+end
+smallpot = dpot(onto, ns(onto), smallT);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/multiply_by_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/multiply_by_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function Tbig = multiply_by_pot(Tbig, Tsmall)
+% MULTIPLY_BY_POT Tbig *= Tsmall
+% Tbig = multiply_by_pot(Tbig, Tsmall)
+%
+% Tsmall's domain must be a subset of Tbig's domain.
+
+%process sparse dpot, we do not consider only one of the two pots is sparse
+if issparse(Tbig.T) & issparse(Tsmall.T)
+ Tbig.T = mult_by_sparse_table(Tbig.T, Tbig.domain, Tbig.sizes, Tsmall.T, Tsmall.domain, Tsmall.sizes);
+else
+ Tbig.T = mult_by_table(Tbig.T, Tbig.domain, Tbig.sizes, Tsmall.T, Tsmall.domain, Tsmall.sizes);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/multiply_pots.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/multiply_pots.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function T = multiply_pots(T1, T2)
+% MULTIPLY_POTS Multiply a pair of dpots together pointwise.
+% T = multiply_pots(pots)
+
+dom = myunion(T1.domain, T2.domain);
+%ns = sparse(1, max(dom)); % causes problems in myreshape on NT
+ns = zeros(1, max(dom));
+ns(T1.domain) = T1.sizes;
+ns(T2.domain) = T2.sizes;
+T = dpot(dom, ns(dom));
+T = multiply_by_pot(T, T1);
+T = multiply_by_pot(T, T2);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/normalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/normalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function [pot, loglik] = normalize_pot(pot)
+% NORMALIZE_POT Convert the discrete potential Pr(X,E) into Pr(X|E) and return log Pr(E).
+% [pot, loglik] = normalize_pot(pot)
+
+if isempty(pot.T) %add to process sparse
+ loglik = 0;
+ return;
+end
+[pot.T, lik] = normalise(pot.T);
+loglik = log(lik + (lik==0)*eps);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/pot_to_marginal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/pot_to_marginal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function m = pot_to_marginal(pot)
+% POT_TO_MARGINAL Convert a dpot to a marginal structure.
+% m = pot_to_marginal(pot)
+
+m.domain = pot.domain;
+m.T = pot.T;
+m.mu = [];
+m.Sigma = [];
+
+%if isvector(m.T)
+% m.T = m.T(:);
+%end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/set_domain_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/set_domain_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function pot = set_domain_pot(pot, domain)
+% SET_DOMAIN_POT Change the domain of a potential (dpot)
+% pot = set_domain_pot(pot, domain)
+
+pot.domain = domain;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/subsasgn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/subsasgn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+function B = subsasgn(A, S, B)
+
+B = builtin('subsasgn', A, S, B);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/subsref.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@dpot/subsref.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3 @@
+function B = subsref(A, S)
+
+B = builtin('subsref', A, S);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+/display.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/marginalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/mpot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/mpot_to_cpot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/normalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/pot_to_marginal.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/rescale_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/set_domain_pot.m/1.1.1.1/Wed Jul 30 13:37:52 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/@mpot
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(pot)
+
+disp('moment Gaussian potential object');
+disp(struct(pot));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/marginalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/marginalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function smallpot = marginalize_pot(bigpot, keep, maximize, useC)
+% MARGINALIZE_POT Marginalize a mpot onto a smaller domain.
+% smallpot = marginalize_pot(bigpot, keep, maximize, useC)
+%
+% The maximize argument is ignored - maxing out a Gaussian is the same as summing it out,
+% since the mode and mean are equal.
+% The useC argument is ignored.
+
+
+node_sizes = sparse(1, max(bigpot.domain));
+node_sizes(bigpot.domain) = bigpot.sizes;
+sum_over = mysetdiff(bigpot.domain, keep);
+
+[logp, mu, Sigma] = marginalize_gaussian(bigpot.logp, bigpot.mu, bigpot.Sigma, ...
+ keep, sum_over, node_sizes);
+smallpot = mpot(keep, node_sizes(keep), logp, mu, Sigma);
+
+%%%%%%
+
+function [logpX, muX, SXX] = marginalize_gaussian(logp, mu, Sigma, X, Y, ns)
+% MARGINALIZE_GAUSSIAN Compute Pr(X) from Pr(X,Y) where X and Y are jointly Gaussian.
+% [logpX, muX, SXX] = marginalize_gaussian(logp, mu, Sigma, X, Y, ns)
+%
+% sizes(i) is the size of the i'th block in domain.
+
+[muX, muY, SXX, SXY, SYX, SYY] = partition_matrix_vec(mu, Sigma, X, Y, ns);
+logpX = logp; % Lauritzen (1996) p161
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/mpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/mpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function pot = mpot(members, sizes, logp, mu, Sigma)
+% MPOT Make a moment Gaussian potential.
+% pot = mpot(members, sizes, logp, mu, Sigma)
+%
+% All params default to 0 if omitted.
+
+n = sum(sizes);
+if nargin < 3, logp = 0; end
+if nargin < 4, mu = zeros(n,1); end
+if nargin < 5, Sigma = zeros(n,n); end
+
+pot.domain = members;
+pot.sizes = sizes;
+pot.logp = logp;
+pot.mu = mu;
+pot.Sigma = Sigma;zeros(n,n);
+pot = class(pot, 'mpot');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/mpot_to_cpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/mpot_to_cpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function can = mpot_to_cpot(mom)
+% MPOT_TO_CPOT Convert a moment potential to canonical form.
+% mom = mpot_to_cpot(can)
+
+[g, h, K] = moment_to_canonical(mom.logp, mom.mu, mom.Sigma);
+can = cpot(mom.domain, mom.sizes, g, h, K);
+
+%%%%%%%%%%%
+
+function [g, h, K] = moment_to_canonical(logp, mu, Sigma)
+% MOMENT_TO_CANONICAL Convert moment characteristics to canonical form.
+% [g, h, K] = moment_to_canonical(logp, mu, Sigma)
+
+K = inv(Sigma);
+h = K*mu;
+n = length(K);
+if isempty(mu)
+ g = logp + 0.5*(log(det(K)) - n*log(2*pi));
+else
+ g = logp + 0.5*(log(det(K)) - n*log(2*pi) - mu'*K*mu);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/normalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/normalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function [pot, loglik] = normalize_pot(pot)
+% NORMALIZE_POT Convert the moment potential Pr(X,E) into Pr(X|E) and return log Pr(E).
+% [pot, loglik] = normalize_pot(pot)
+
+loglik = pot.logp;
+pot.logp = 0;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/pot_to_marginal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/pot_to_marginal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function m = pot_to_marginal(pot)
+% POT_TO_MARGINAL Convert a mpot to a marginal structure.
+% m = pot_to_marginal(pot)
+
+m.domain = pot.domain;
+m.T = exp(pot.logp);
+m.mu = pot.mu;
+m.Sigma = pot.Sigma;
+
+if isvectorBNT(m.T)
+ m.T = m.T(:)';
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/rescale_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/rescale_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function pot = rescale_pot(pot, s)
+% RESCALE_POT Add a constant to the mpot scale factor.
+% pot = rescale_pot(pot, s)
+
+pot.logp = pot.logp + s;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/set_domain_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@mpot/set_domain_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function pot = set_domain_pot(pot, domain)
+% SET_DOMAIN_POT Change the domain of a potential (mpot)
+% pot = set_domain_pot(pot, domain)
+
+pot.domain = domain;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+/marginalize_pot.m/1.1.1.1/Sun May 19 22:11:08 2002//
+/normalize_pot.m/1.1.1.1/Sun May 19 22:11:08 2002//
+/reduce_pot.m/1.1.1.1/Tue Mar 11 17:37:02 2003//
+/rescale_pot.m/1.1.1.1/Sun May 19 22:11:08 2002//
+/scgcpot.m/1.1.1.1/Sun May 19 22:11:08 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/@scgcpot
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/marginalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/marginalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+function smallpot = marginalize_pot(bigpot, keepdom, sumoverdom, nodesizes)
+% MARGINALIZE_POT Marginalize a mpot onto a smaller domain.
+% smallpot = marginalize_pot(bigpot, keep)
+
+keepsize = sum(nodesizes(keepdom));
+[A1, A2, B1, B2, C11, C12, C21, C22] = partition_matrix_vec_3(bigpot.A, bigpot.B, bigpot.C, keepdom, sumoverdom, nodesizes);
+smallpot = scgcpot(keepsize, bigpot.ctailsize, bigpot.p, A1, B1, C11);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/normalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/normalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,6 @@
+function [pot, loglik] = normalize_pot(pot)
+% NORMALIZE_POT Convert the element of stable conditional gaussian potential Pr(X,E) into Pr(X|E) and return log Pr(E).
+% [pot, loglik] = normalize_pot(pot)
+
+loglik = log(pot.p);
+pot.p = 1;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/reduce_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/reduce_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function [reduced_pot,successful] = reduce(pot,tailnodes)
+% Executes the reduce operation defined in
+% Stable Local Computation with Conditional Gaussian Distributions
+% Steffen L. Lauritzen
+% Frank Jensen
+% September 1999
+% The potential pot is reduced if B contains any zero columns
+% The test are restricted to the positions in tailnodes.
+% Any columns successfully deleted are entered in the array successful
+
+if nargin < 2
+ tailnodes = 1:pot.ctailsize;
+end
+
+successful = [];
+
+% Look for all columns beeing equal to zero
+for i = tailnodes
+ if ~any(pot.B(:,i))
+ successful = [successful i];
+ end
+end
+
+remain = mysetdiff(1:pot.ctailsize,successful);
+
+% Erase the zero-columns and decrease the tailsize
+pot.B = pot.B(:,remain);
+pot.ctailsize = pot.ctailsize - length(successful);
+
+% Return the reduced potential
+reduced_pot = pot;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/rescale_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/rescale_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function pot = rescale_pot(pot, s)
+% RESCALE_POT Add a constant to the mpot scale factor.
+% pot = rescale_pot(pot, s)
+
+pot.p = pot.p*s;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/scgcpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgcpot/scgcpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function pot = scgcpot(cheadsize, ctailsize, p, A, B, C)
+% SCGCPOT Make a base object of stable conditional gaussian potential.
+% pot = scgcpot(cheadsize, ctailsize, p, A, B, C)
+%
+% cheadsize is the demension of head nodes.
+% ctailsize is the demension of tail nodes.
+% r = cheadsize, s = ctailsize
+% p is discrete probability.
+% A is table of r*1 vectors;
+% B is r*s matrices
+% C is r*r positive semidefinite symmetric matrices
+
+if nargin < 3
+ p = 1;
+end
+if nargin < 4
+ A = zeros(cheadsize,1);
+end
+if nargin < 5
+ B = zeros(cheadsize,ctailsize);
+end
+if nargin < 6
+ C = zeros(cheadsize,cheadsize);
+end
+
+if isempty(A)
+ A = zeros(cheadsize,1);
+end
+if isempty(B)
+ B = zeros(cheadsize,ctailsize);
+end
+if isempty(C)
+ C = zeros(cheadsize,cheadsize);
+end
+
+pot.cheadsize = cheadsize;
+pot.ctailsize = ctailsize;
+
+pot.p = p;
+pot.A = A;
+pot.B = B;
+pot.C = C;
+%if cheadsize == 0
+% pot.A = [];
+%end
+%if ctailsize == 0
+% pot.B = [];
+%end
+pot = class(pot, 'scgcpot');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+/README/1.1.1.1/Thu Mar 20 15:07:16 2003//
+/combine_pots.m/1.1.1.1/Tue Mar 11 17:49:28 2003//
+/complement_pot.m/1.1.1.1/Wed May 21 13:49:34 2003//
+/direct_combine_pots.m/1.1.1.1/Sun May 19 22:11:08 2002//
+/extension_pot.m/1.1.1.1/Fri Jan 24 12:52:34 2003//
+/marginalize_pot.m/1.1.1.1/Tue Mar 11 17:06:08 2003//
+/normalize_pot.m/1.1.1.1/Wed May 21 13:49:44 2003//
+/pot_to_marginal.m/1.1.1.1/Sun May 19 22:11:08 2002//
+/recursive_combine_pots.m/1.1.1.1/Wed May 21 13:49:48 2003//
+/reduce_pot.m/1.1.1.1/Tue Mar 11 18:07:12 2003//
+/scgpot.m/1.1.1.1/Tue Mar 11 14:04:48 2003//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/@scgpot
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/README
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/README Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+% Stable conditional Gaussian inference
+% Written by Rainer Deventer
+
+
+@techreport{Lauritzen99,
+ author = "S. Lauritzen and F. Jensen",
+ title = "Stable Local Computation with Conditional {G}aussian Distributions",
+ year = 1999,
+ number = "R-99-2014",
+ institution = "Dept. Math. Sciences, Aalborg Univ."
+}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/combine_pots.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/combine_pots.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function pot = combine_pots(pot1, pot2)
+% COMBINE_POTS combine two potentials
+% pot = combine_pots(pot1, pot2)
+
+% Reduce both potentials before trying to combine them.
+% Cf. "Stable Local computation with Conditional Gaussian Distributions", page 9
+% Consider again two potentials with minimal tail
+
+% Guarantee minimal tails. If pot1 or pot2 are minimal, they are not changed
+pot1 = reduce_pot(pot1);
+pot2 = reduce_pot(pot2);
+
+%if the intersect set of these two potentials' head conts. combination is undifined
+if ~isempty( myintersect(pot1.cheaddom, pot2.cheaddom) )
+ return;
+end
+
+if isempty( myintersect(pot1.domain, pot2.cheaddom) ) | isempty( myintersect(pot2.domain, pot1.cheaddom))
+ % if satisfy the condition of directed combine
+ pot = direct_combine_pots(pot1, pot2);
+else
+ % perform recursive combine
+ pot = recursive_combine_pots(pot1, pot2);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/complement_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/complement_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,221 @@
+function [margpot, comppot] = complement_pot(pot, keep)
+% COMPLEMENT_POT complement means decompose of a potential into its strong marginal and
+% its complement corresponds exactly to the decomposition of a probability distribution
+% into its marginal and conditional
+% [margpot, comppot] = complement_pot(pot, keep)
+
+% keep can only include continuous head nodes and discrete nodes
+% margpot is the stable CG potential of keep nodes
+% comppot is the stable CG potential of others in corresponds exactly to
+% the discomposition of a probability distribution of its marginal and conditional
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Calculation of the marginal requires integration over %
+% all variables in csumover. Thus cheadkeep contains all %
+% continuous variables in the marginal potential %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%keyboard;
+csumover = mysetdiff(pot.cheaddom, keep);
+cheadkeep = mysetdiff(pot.cheaddom, csumover);
+
+nodesizes = zeros(1, max(pot.domain));
+nodesizes(pot.ddom) = pot.dsizes;
+nodesizes(pot.cheaddom) = pot.cheadsizes;
+nodesizes(pot.ctaildom) = pot.ctailsizes;
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Description of the variables in the marginal domain %
+% For the calculation of a strong marginal first integration %
+% over all continuous variables in the head takes place. %
+% The calculation of the marginal over the head variables %
+% might result in a smaller or empty tail %
+% If there are no head variables, and therefore no tail %
+% variables, left marginalisation over discrete variables %
+% may take place %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+margdom = mysetdiff(pot.domain,keep);
+% margddom = pot.ddom;
+margcheaddom = cheadkeep;
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Marginalisation over discrete variables is only allowed when %
+% the tail is empty %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+margddom = myintersect(pot.ddom,keep); % Discrete domain of marginal
+margctaildom = myintersect(pot.ctaildom,keep); % Tail domain
+assert(isempty(mysetdiff(pot.ddom,margddom)) | isempty(margctaildom))
+
+
+%margctaildom = pot.ctaildom;
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Even if marginalisation over continuous variables is only defined %
+% for head variables, the marginalisation over haed-variables might %
+% result in a smaller tail %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+margctaildom = myintersect(pot.ctaildom,keep);
+
+margcheadsizes = nodesizes(margcheaddom);
+margcheadsize = sum(margcheadsizes);
+margctailsizes = nodesizes(margctaildom);
+margctailsize = sum(margctailsizes);
+
+compdom = pot.domain;
+compddom = pot.ddom;
+compcheaddom = csumover;
+compctaildom = myunion(pot.ctaildom, cheadkeep);
+compcheadsizes = nodesizes(compcheaddom);
+compcheadsize = sum(compcheadsizes);
+compctailsizes = nodesizes(compctaildom);
+compctailsize = sum(compctailsizes);
+
+dkeep = myintersect(pot.ddom, keep);
+%if dom is only contain discrete node
+if isempty(pot.cheaddom)
+ dsumover = mysetdiff(pot.ddom, dkeep);
+
+ if isempty(dsumover)
+ margpot = pot;
+ comppot = scgpot([], [], [], []);
+ return;
+ end
+
+
+ I = prod(nodesizes(dkeep));
+ J = prod(nodesizes(dsumover));
+ sum_map = find_equiv_posns(dsumover, pot.ddom);
+ keep_map = find_equiv_posns(dkeep, pot.ddom);
+ iv = zeros(1, length(pot.ddom)); % index vector
+ p1 = zeros(I,J);
+ for i=1:I
+ keep_iv = ind2subv(nodesizes(dkeep), i);
+ iv(keep_map) = keep_iv;
+ for j=1:J
+ sum_iv = ind2subv(nodesizes(dsumover), j);
+ iv(sum_map) = sum_iv;
+ k = subv2ind(nodesizes(pot.ddom), iv);
+ potc = struct(pot.scgpotc{k}); % violate object privacy
+ p1(i,j) = potc.p;
+ end
+ end
+ p2 = sum(p1,2);
+ p2 = p2 + (p2==0)*eps;
+
+ margscpot = cell(1, I);
+ compscpot = cell(1, I*J);
+ iv = zeros(1, length(pot.ddom)); % index vector
+ for i=1:I
+ margscpot{i} = scgcpot(0, 0, p2(i));
+ keep_iv = ind2subv(nodesizes(dkeep), i);
+ iv(keep_map) = keep_iv;
+ for j=1:J
+ sum_iv = ind2subv(nodesizes(dsumover), j);
+ iv(sum_map) = sum_iv;
+ k = subv2ind(nodesizes(pot.ddom), iv);
+ q = p1(i,j)/p2(i);
+ compscpot{k} = scgcpot(0, 0, q);
+ end
+ end
+
+ margpot = scgpot(dkeep, [], [], nodesizes, margscpot);
+ comppot = scgpot(pot.ddom, [], [], nodesizes,compscpot);
+ return;
+end
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% head of the potential is not empty %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+dsize = pot.dsize;
+compscpot = cell(1, dsize);
+
+fmaskh = find_equiv_posns(margcheaddom, compctaildom);
+fmaskt = find_equiv_posns(margctaildom, compctaildom);
+
+fh = block(fmaskh, compctailsizes);
+ft = block(fmaskt, compctailsizes);
+
+
+if ~isempty(margcheaddom)
+ for i=1:dsize
+ potc = struct(pot.scgpotc{i});
+ q = 1;
+ p = potc.p;
+ [A1, A2, B1, B2, C11, C12, C21, C22] = partition_matrix_vec_3(potc.A, potc.B, potc.C, margcheaddom, compcheaddom, nodesizes);
+
+ if ~isempty(margcheaddom)
+ margscpot{i} = scgcpot(margcheadsize, margctailsize, p, A1, B1, C11);
+ else
+ margscpot{i} = scgcpot(margcheadsize, margctailsize, p);
+ end
+
+ if ~isempty(compcheaddom)
+ if ~isempty(margcheaddom)
+ E = A2 - C21*pinv(C11)*A1;
+ tmp1 = C21*pinv(C11);
+ tmp2 = B2 - C21*pinv(C11)*B1;
+ F = zeros(compcheadsize, compctailsize);
+ F(:, fh) = tmp1;
+ F(:, ft) = tmp2;
+ G = C22 - C21*pinv(C11)*C12;
+ else
+ E = A2;
+ F = B2;
+ G = C22;
+ end
+ compscpot{i} = scgcpot(compcheadsize, compctailsize, q, E, F, G);
+ else
+ compscpot{i} = scgcpot(compcheadsize, 0, q);
+ end
+ if isempty(margcheaddom)
+ margpot = scgpot(margddom, [], [], nodesizes, margscpot);
+ else
+ margpot = scgpot(margddom, margcheaddom, margctaildom, nodesizes, margscpot);
+ end
+ end
+else
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % Marginalisation took place over all head variables. %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % Calculate the strong marginal %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ margpot = marginalize_pot(pot,keep);
+ mPot = struct(margpot);
+ for i =1:dsize
+ potc = struct(pot.scgpotc{i});
+ % Get the probability of the original potential %
+ q = potc.p;
+
+ % Get the configuration defined by the index i%
+ config = ind2subv(pot.dsizes,i);
+
+ % Calculate the corresponding configuration in the marginal potential
+ if isempty(margpot.dsizes)
+ % keep == []
+ indMargPot = 1;
+ else
+ equivPos = find_equiv_posns(dkeep,pot.ddom);
+ indMargPot = subv2ind(margpot.dsizes,config(equivPos));
+ end
+ % Figure out the corresponding marginal potential
+ mPotC = struct(mPot.scgpotc{indMargPot});
+ p = mPotC.p;
+ if p == 0
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % The following assignment is correct as p is only zero if q is also zero %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ compscpot{i} = scgcpot(compcheadsize,compctailsize,0,potc.A,potc.B,potc.C);
+ else
+ compscpot{i} = scgcpot(compcheadsize,compctailsize,q/p,potc.A,potc.B,potc.C);
+ end
+ end
+end
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Put all components in one potential %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+if isempty(compcheaddom)
+ comppot = scgpot(compddom, [], [], nodesizes,compscpot);
+else
+ comppot = scgpot(compddom, compcheaddom, compctaildom, nodesizes,compscpot);
+end
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/direct_combine_pots.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/direct_combine_pots.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,161 @@
+function pot = direct_combine_pots(pot1, pot2)
+% DIRECTED_COMBINE_POTS The combination operation corresponds to ordinary composition of conditional distributions.
+% In some sense is similar to that of forming disjoint union of set.
+% pot = direct_combine_pots(pot1, pot2)
+
+% directed combine can be performed under the conditon that the head node set of pot1 is disjoint from the domain of
+% pot2 or vice versa. if the last conditon was satisfied we exchange the pot1 and pot2 firstly then perform the operation.
+% If neither of them was satified the directed combine is undifined.
+
+
+if isempty( myintersect(pot1.domain, pot2.cheaddom) )
+ pot1 = pot1;
+ pot2 = pot2;
+elseif isempty( myintersect(pot2.domain, pot1.cheaddom))
+ temppot = pot1;
+ pot1 = pot2;
+ pot2 = temppot;
+else
+ assert(0);
+ return;
+end
+
+domain = myunion(pot1.domain, pot2.domain);
+nodesizes = zeros(1,max(domain));
+nodesizes(pot2.ctaildom) = pot2.ctailsizes;
+nodesizes(pot2.cheaddom) = pot2.cheadsizes;
+nodesizes(pot2.ddom) = pot2.dsizes;
+nodesizes(pot1.ctaildom) = pot1.ctailsizes;
+nodesizes(pot1.cheaddom) = pot1.cheadsizes;
+nodesizes(pot1.ddom) = pot1.dsizes;
+
+dom_u = mysetdiff(pot2.ctaildom, pot1.cheaddom);
+if ~isempty(dom_u) & ~mysubset(dom_u, pot1.ctaildom)
+ pot1 = extension_pot(pot1, [], [], dom_u, nodesizes(dom_u));
+end
+
+dom_u = myunion(pot1.cheaddom, pot1.ctaildom);
+if ~isempty(dom_u) & ~mysubset(dom_u, pot2.ctaildom)
+ pot2 = extension_pot(pot2, [], [], dom_u, nodesizes(dom_u));
+end
+
+
+cheaddom = myunion(pot1.cheaddom, pot2.cheaddom);
+ctaildom = mysetdiff(myunion(pot1.ctaildom, pot2.ctaildom), cheaddom);
+cdom = myunion(cheaddom, ctaildom);
+ddom = mysetdiff(domain, cdom);
+dsizes = nodesizes(ddom);
+dsize = prod(nodesizes(ddom));
+cheadsizes = nodesizes(cheaddom);
+cheadsize = sum(nodesizes(cheaddom));
+ctailsizes = nodesizes(ctaildom);
+ctailsize = sum(nodesizes(ctaildom));
+
+r1 = pot1.cheadsize;
+s1 = pot1.ctailsize;
+scpot = cell(1, dsize);
+mask1 = [];
+mask2 = [];
+if ~isempty(pot1.ddom)
+ mask1 = find_equiv_posns(pot1.ddom, ddom);
+end
+if ~isempty(pot2.ddom)
+ mask2 = find_equiv_posns(pot2.ddom, ddom);
+end
+cmask1 = [];
+cmask2 = [];
+if ~isempty(pot1.cheaddom)
+ cmask1 = find_equiv_posns(pot1.cheaddom, cheaddom);
+end
+if ~isempty(pot2.cheaddom)
+ cmask2 = find_equiv_posns(pot2.cheaddom, cheaddom);
+end
+
+u1 = block(cmask1, cheadsizes);
+u2 = block(cmask2, cheadsizes);
+
+fmaskh = find_equiv_posns(pot1.cheaddom, pot2.ctaildom);
+fmaskt = find_equiv_posns(pot1.ctaildom, pot2.ctaildom);
+
+fh = block(fmaskh, pot2.ctailsizes);
+ft = block(fmaskt, pot2.ctailsizes);
+
+for i=1:dsize
+ sub = ind2subv(dsizes, i);
+ sub1 = sub(mask1);
+ sub2 = sub(mask2);
+ ind1 = subv2ind(pot1.dsizes, sub1);
+ ind2 = subv2ind(pot2.dsizes, sub2);
+
+ if isempty(ind1)
+ ind1 = 1;
+ end
+ if isempty(ind2)
+ ind2 = 1;
+ end
+ potc1 = struct(pot1.scgpotc{ind1});
+ potc2 = struct(pot2.scgpotc{ind2});
+ p = potc1.p;
+ q = potc2.p;
+ ro = p*q;
+
+ A = potc1.A;
+ B = potc1.B;
+ C = potc1.C;
+
+ E = potc2.A;
+ F = potc2.B;
+ G = potc2.C;
+
+ F1 = F(:, fh);
+ F2 = F(:, ft);
+
+ if ~isempty(F1)
+ K1 = F1*A;
+ K2 = F1*B;
+ FCF = F1*C*F1';
+ FC = F1*C;
+ CFT = C*F1';
+ else
+ K1 = zeros(size(E));
+ K2 = zeros(size(F2));
+ FCF = zeros(size(G));
+ FC = zeros(size(C, 1), size(G, 2));
+ CFT = zeros(size(G, 2), size(C, 1));
+ end
+
+
+ U = zeros(cheadsize,1);
+ W = zeros(cheadsize,cheadsize);
+ V = zeros(cheadsize,ctailsize);
+
+ if cheadsize > 0
+ U(u1) = A;
+ U(u2) = E + K1;
+ W(u1, u1) = C;
+ W(u2, u2) = G + FCF;
+ W(u1, u2) = CFT;
+ W(u2, u1) = FC;
+ else
+ U = zeros(cheadsize,1);
+ W = zeros(cheadsize,cheadsize);
+ end
+ if cheadsize > 0 | ctailsize > 0
+ if ~isempty(u1)
+ V(u1, :) = B;
+ else
+ V(u1, :) = zeros(potc1.cheadsize, ctailsize);
+ end
+ if ~isempty(u2)
+ V(u2, :) = F2 + K2;
+ else
+ V(u2, :) = zeros(potc2.cheadsize, ctailsize);
+ end
+ else
+ V = zeros(cheadsize,ctailsize);
+ end
+
+ scpot{i} = scgcpot(cheadsize, ctailsize, ro, U, V, W);
+end
+
+pot = scgpot(ddom, cheaddom, ctaildom, nodesizes, scpot);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/extension_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/extension_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+function pot = extension_pot(oldpot, ddom_u, dsizes, ctaildom_u, csizes)
+% EXTENSION_POT Extense a stable CG potential.
+% pot = extension_pot(oldpot, ddom_u, ctaildom_u, dsizes, csizes)
+% ddom_u Added discrete nodes
+% ctaildom_u Added continuous tail nodes
+% csizes is the size of the tail nodes.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% A CG potential can be extended by adding discrete variables to its %
+% domain of continuous variables to its tail %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ddom = myunion(oldpot.ddom, ddom_u);
+ctaildom = myunion(oldpot.ctaildom, ctaildom_u);
+cheaddom = oldpot.cheaddom;
+udom = myunion(ddom_u, ctaildom_u);
+domain = myunion(oldpot.domain, udom);
+
+ns = zeros(1,max(domain));
+ns(ddom_u) = dsizes;
+ns(ctaildom_u) = csizes;
+ns(oldpot.ddom) = oldpot.dsizes;
+ns(oldpot.cheaddom) = oldpot.cheadsizes;
+ns(oldpot.ctaildom) = oldpot.ctailsizes;
+
+dsizes = ns(ddom);
+dsize = prod(ns(ddom));
+cheadsizes = ns(cheaddom);
+cheadsize = sum(ns(cheaddom));
+ctailsizes = ns(ctaildom);
+ctailsize = sum(ns(ctaildom));
+
+BZ = zeros(cheadsize, ctailsize);
+potarray = cell(1, dsize);
+mask = find_equiv_posns(oldpot.ddom, ddom);
+
+tmask = find_equiv_posns(oldpot.ctaildom, ctaildom);
+tu = block(tmask, ctailsizes);
+
+for i=1:dsize
+ sub1 = ind2subv(dsizes, i);
+ sub2 = sub1(mask);
+ ind = subv2ind(oldpot.dsizes, sub2);
+ if isempty(ind)
+ ind = 1;
+ end
+ potc = struct(oldpot.scgpotc{ind});
+ p = potc.p;
+ B = BZ;
+ if ~isempty(B)
+ B(:, tu) = potc.B;
+ end
+ potarray{i} = scgcpot(cheadsize, ctailsize, p, potc.A, B, potc.C);
+end
+
+pot = scgpot(ddom, cheaddom, ctaildom, ns,potarray);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/marginalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/marginalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,92 @@
+function smallpot = marginalize_pot(bigpot, keep)
+% MARGINALIZE_POT Marginalize a cgpot onto a smaller domain.
+% smallpot = marginalize_pot(bigpot, keep)
+
+sumover = mysetdiff(bigpot.domain, keep);
+cdom = myunion(bigpot.cheaddom, bigpot.ctaildom);
+csumover = myintersect(sumover, bigpot.cheaddom);
+dsumover = myintersect(sumover, bigpot.ddom);
+
+dkeep = myintersect(keep, bigpot.ddom);
+ckeep = myintersect(keep, bigpot.cheaddom);
+cheaddom = myintersect(keep, bigpot.cheaddom);
+
+assert(isempty(myintersect(csumover,bigpot.ctaildom)));
+ns = zeros(1, max(bigpot.domain));
+ns(bigpot.ddom) = bigpot.dsizes;
+ns(bigpot.cheaddom) = bigpot.cheadsizes;
+ns(bigpot.ctaildom) = bigpot.ctailsizes;
+
+
+if sum(ns(csumover)) > 0
+ for i=1:bigpot.dsize
+ bigpot.scgpotc{i} = marginalize_pot(bigpot.scgpotc{i}, ckeep, csumover, ns);
+ end
+end
+
+if (isequal(csumover, cheaddom))
+ bigpot.ctaildom = [];
+end
+% If we are not marginalizing over any discrete nodes, we are done.
+if prod(ns(dsumover))==1
+ smallpot = scgpot(dkeep, cheaddom, bigpot.ctaildom, ns, bigpot.scgpotc);
+ return;
+end
+
+if (~isempty(bigpot.ctaildom))
+ assert(0);
+ return;
+end
+
+I = prod(ns(dkeep));
+J = prod(ns(dsumover));
+C = sum(ns(ckeep));
+sum_map = find_equiv_posns(dsumover, bigpot.ddom);
+keep_map = find_equiv_posns(dkeep, bigpot.ddom);
+iv = zeros(1, length(bigpot.ddom)); % index vector
+
+p1 = zeros(I,J);
+A1 = zeros(C,J,I);
+C1 = zeros(C,C,J,I);
+for i=1:I
+ keep_iv = ind2subv(ns(dkeep), i);
+ iv(keep_map) = keep_iv;
+ for j=1:J
+ sum_iv = ind2subv(ns(dsumover), j);
+ iv(sum_map) = sum_iv;
+ k = subv2ind(ns(bigpot.ddom), iv);
+ pot = struct(bigpot.scgpotc{k}); % violate object privacy
+ p1(i,j) = pot.p;
+ if C > 0 % so mu1 and Sigma1 are non-empty
+ A1(:,j,i) = pot.A;
+ C1(:,:,j,i) = pot.C;
+ end
+ end
+end
+
+% Collapse the mixture of Gaussians
+coef = mk_stochastic(p1); % coef must be convex combination
+%keyboard
+p2 = sum(p1,2);
+if (all(p2 == 0))
+ p2 = p2 + (p2==0)*eps;
+end
+A = [];
+S = [];
+
+pot = cell(1,I);
+ctailsize = sum(ns(bigpot.ctaildom));
+tB = zeros(C, ctailsize);
+for i=1:I
+ if C > 0
+ [A, S] = collapse_mog(A1(:,:,i), C1(:,:,:,i), coef(i,:));
+ end
+ p = p2(i);
+ pot{i} = scgcpot(C, ctailsize, p, A, tB, S);
+end
+
+smallpot = scgpot(dkeep, ckeep, bigpot.ctaildom, ns, pot);
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/normalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/normalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function [pot, loglik] = normalize_pot(pot)
+% NORMALIZE_POT Convert the SCG potential Pr(X,E) into Pr(X|E) and return log Pr(E).
+% [pot, loglik] = normalize_pot(pot)
+
+% Marginalize down to [], so that the normalizing constant becomes Pr(E)
+temp = marginalize_pot(pot, []);
+[temp2, loglik] = normalize_pot(temp.scgpotc{1});
+
+% Adjust scale factor to reflect the fact that the pot now represents Pr(X | E) instead of Pr(X,E).
+
+scale = -loglik;
+if 1
+ for i=1:pot.dsize
+ pot.scgpotc{i} = rescale_pot( pot.scgpotc{i}, scale);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/pot_to_marginal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/pot_to_marginal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function m = pot_to_marginal(pot)
+% POT_TO_MARGINAL Convert a scgpot to a marginal structure.
+% m = pot_to_marginal(pot)
+
+assert(isempty(pot.ctaildom))
+m.domain = pot.domain;
+n = pot.cheadsize;
+d = pot.dsize;
+
+if n==0
+ m.mu = [];
+ m.Sigma = [];
+else
+ m.mu = zeros(n, d);
+ m.Sigma = zeros(n, n, d);
+end
+%m.T = 0*myones(pot.dsizes);
+m.T = 0*myones(pot.dsize);
+for i=1:pot.dsize
+ potc = struct(pot.scgpotc{i}); % violate privacy of object
+ if n > 0
+ m.mu(:,i) = potc.A;
+ m.Sigma(:,:,i) = potc.C;
+ end
+ m.T(i) = potc.p;
+end
+if isvectorBNT(m.T)
+ m.T = m.T(:)';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/recursive_combine_pots.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/recursive_combine_pots.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function pot = recursive_combine_pots(pot1, pot2)
+% RECURSIVE_COMBINE_POTS recursive combine two potentials
+% pot = recursive_combine_pots(pot1, pot2)
+
+pot1 = reduce_pot(pot1);
+pot2 = reduce_pot(pot2);
+% Recursion is stopped, if recusive-combination is defined by direct combination,
+% i.e. if the domain of one potential is disjoint from the head of the other.
+if (isempty(myintersect(pot1.domain,pot2.cheaddom))|...
+ isempty(myintersect(pot1.cheaddom,pot2.domain)))
+ pot = direct_combine_pots(pot1,pot2);
+else
+ % Test wether one of the set-differences is not empty
+ % as defined in Lauritzen99 "Stable Local Computation with Conditional Gaussian Distributions"
+ % on page 9
+ D12 = mysetdiff(pot1.cheaddom, pot2.domain);
+ D21 = mysetdiff(pot2.cheaddom, pot1.domain);
+ if (isempty(D12) & isempty(D21))
+ assert(0,'Recursive combination is not defined');
+ end
+
+ if ~isempty(D12)
+ % Calculate the complementary potential for the set
+ % D1\D12 as defined in Lauritzen 99, page 9
+ keep = mysetdiff(pot1.domain,D12);
+ [margpot, comppot] = complement_pot(pot1,keep);
+ margpot = reduce_pot(margpot);
+ comppot = reduce_pot(comppot);
+ pot = direct_combine_pots( recursive_combine_pots(margpot, pot2), comppot);
+ elseif ~isempty(D21)
+ keep = mysetdiff(pot2.domain,D21);
+ [margpot, comppot] = complement_pot(pot2,D21);
+ margpot = reduce_pot(margpot);
+ comppot = reduce_pot(comppot);
+ pot = direct_combine_pots( recursive_combine_pots(pot1, margpot), comppot);
+ end
+end
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/reduce_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/reduce_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+function [reduced_pot,successful] = reduce_pot(pot,tailnodes)
+% Executes the reduce operation defined in
+% Stable Local Computation with Conditional Gaussian Distributions
+% Steffen L. Lauritzen
+% Frank Jensen
+% September 1999
+% The potential pot is reduced if B contains any zero columns
+% The test are restricted to the positions in tailnodes.
+% Any columns successfully deleted are entered in the array successful
+if nargin < 2
+ tailnodes = pot.ctaildom;
+end
+
+successful = [];
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Keep track of remaining tailnodes %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+rem_tailnodes = pot.ctaildom;
+for i = tailnodes
+ pos = find(i==rem_tailnodes);
+ successful_red = [pos];
+ red_scgcpot = cell(1,pot.dsize);
+ j = 1;
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % Test whether all components of pot.scgpotc can be reduced %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ while ((j <= pot.dsize) & ~isempty(successful_red))
+ [cpot,successful_red] = reduce_pot(pot.scgpotc{j},pos);
+ red_scgcpot{j} = cpot;
+ j = j + 1;
+ end
+
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % If i is a reducible tailnode, then reduce the potential %
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ if ~isempty(successful_red)
+ successful = [successful i];
+ pot.scgpotc = red_scgcpot;
+ rem_tailnodes = mysetdiff(rem_tailnodes,i);
+ end;
+end
+
+pot.ctaildom = rem_tailnodes;
+positions = find_equiv_posns(rem_tailnodes,pot.ctaildom);
+pot.ctailsizes = pot.ctailsizes(positions);
+pot.ctailsize = sum(pot.ctailsizes);
+pot.domain = mysetdiff(pot.domain,successful);
+reduced_pot = pot;
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/scgpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@scgpot/scgpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function pot = scgpot(ddom, cheaddom, ctaildom, node_sizes, scgpotc)
+% SCGPOT Make a stable CG potential.
+% pot = scgpot(ddom, cheaddom, ctaildom, node_sizes, scgpotc)
+%
+% ddom is discrete nodes contains in the potential
+% cheaddom is head nodes constains in the potential
+% ctaildom is tail nodes contains in the potential
+% node_sizes(i) is the size of the i'th node.
+% scgpotc is list of scgcpot objects.
+
+pot.ddom = ddom;
+pot.cheaddom = cheaddom;
+pot.ctaildom = ctaildom;
+pot.domain = myunion(ddom, myunion(cheaddom, ctaildom));
+pot.dsizes = node_sizes(pot.ddom);
+pot.dsize = prod(node_sizes(pot.ddom));
+pot.cheadsizes = node_sizes(pot.cheaddom);
+pot.cheadsize = sum(node_sizes(pot.cheaddom));
+pot.ctailsizes = node_sizes(pot.ctaildom);
+pot.ctailsize = sum(node_sizes(pot.ctaildom));
+
+if nargin < 5
+ scgpotc = cell(1, pot.dsize);
+ for i=1:pot.dsize
+ scgpotc{i} = scgcpot(pot.cheadsize, pot.ctailsize);
+ end
+end
+pot.scgpotc = scgpotc;
+
+pot = class(pot, 'scgpot');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+/approxeq_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/display.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/divide_by_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/marginalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/multiply_by_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/normalize_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/pot_to_marginal.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/upot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/upot_to_opt_policy.m/1.1.1.1/Wed May 29 15:59:58 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/@upot
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/approxeq_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/approxeq_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,5 @@
+function p = approxeq_pot(A, B, tol)
+
+if nargin < 3, tol = 1e-3; end
+
+p = approxeq(A.p, B.p, tol) & approxeq(A.u, B.u, tol);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/display.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/display.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function display(pot)
+
+disp('utility potential object');
+disp(struct(pot));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/divide_by_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/divide_by_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function Tbig = divide_by_pot(Tbig, Tsmall)
+% DIVIDE_BY_POT Tbig /= Tsmall
+% Tbig = divide_by_pot(Tbig, Tsmall)
+%
+% Tsmall's domain must be a subset of Tbig's domain.
+
+smallp = extend_domain_table(Tsmall.p, Tsmall.domain, Tsmall.sizes, Tbig.domain, Tbig.sizes);
+smallp = smallp + (smallp==0);
+Tbig.p = Tbig.p ./ smallp;
+
+smallu = extend_domain_table(Tsmall.u, Tsmall.domain, Tsmall.sizes, Tbig.domain, Tbig.sizes);
+Tbig.u = Tbig.u - smallu;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/marginalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/marginalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+function smallpot = marginalize_pot(bigpot, onto, maximize)
+% MARGINALIZE_POT Marginalize a upot onto a smaller domain.
+% smallpot = marginalize_pot(bigpot, onto, maximize)
+%
+% The maximize argument is ignored
+
+numer = marg_table(bigpot.p .* bigpot.u, bigpot.domain, bigpot.sizes, onto);
+denom = marg_table(bigpot.p, bigpot.domain, bigpot.sizes, onto);
+
+p = denom;
+% replace 0s by 1s before dividing. This is valid since demon(i) = 0 => numer(i) = 0
+denom = denom + (denom == 0);
+u = numer ./ denom;
+
+ns = zeros(1, max(bigpot.domain));
+ns(bigpot.domain) = bigpot.sizes;
+
+smallpot = upot(onto, ns(onto), p, u);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/multiply_by_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/multiply_by_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12 @@
+function Tbig = multiply_by_pot(Tbig, Tsmall)
+% MULTIPLY_BY_POT Tbig *= Tsmall
+% Tbig = multiply_by_pot(Tbig, Tsmall)
+%
+% Tsmall's domain must be a subset of Tbig's domain.
+
+smallp = extend_domain_table(Tsmall.p, Tsmall.domain, Tsmall.sizes, Tbig.domain, Tbig.sizes);
+Tbig.p = Tbig.p .* smallp;
+
+smallu = extend_domain_table(Tsmall.u, Tsmall.domain, Tsmall.sizes, Tbig.domain, Tbig.sizes);
+Tbig.u = Tbig.u + smallu;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/normalize_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/normalize_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+function [pot, loglik] = normalize_pot(pot)
+% NORMALIZE_POT Convert the probability part of a utility potential
+% [pot, loglik] = normalize_pot(pot)
+
+[pot.p, lik] = normalise(pot.p);
+%pot.u = pot.u - sum(pot.u(:));
+%pot.u = pot.u ./ sum(pot.u(:)); % same as normalise(pot.u)
+%pot.u = normalise(pot.u);
+%pot.u = pot.u / 726.8121;
+pot.u = pot.u / 10;
+loglik = log(lik + (lik==0)*eps);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/pot_to_marginal.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/pot_to_marginal.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+function m = pot_to_marginal(pot)
+% POT_TO_MARGINAL Convert a upot to a structure.
+% m = pot_to_marginal(pot)
+
+m.domain = pot.domain;
+m.T = pot.p;
+m.U = pot.u;
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/upot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/upot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+function pot = upot(domain, sizes, p, u)
+% UPOT Make a discrete utility potential.
+% pot = upot(domain, sizes, p, u)
+%
+% sizes(i) is the size of the i'th domain element.
+% p defaults to all 1s, u defaults to all 0s.
+
+if nargin < 3, p = myones(sizes); end
+if nargin < 4, u = 0*myones(sizes); end
+
+pot.domain = domain;
+pot.p = myreshape(p, sizes);
+pot.u = myreshape(u, sizes);
+pot.sizes = sizes(:)';
+pot = class(pot, 'upot');
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/upot_to_opt_policy.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/@upot/upot_to_opt_policy.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function [policy, EU] = upot_to_opt_policy(pot)
+% UPOT_TO_OPT_POLICY Compute an optimal deterministic policy given a utility potential
+% [policy, EU] = upot_to_opt_policy(pot)
+%
+% policy(a,b, ..., z) = P(do z | a, b, ..), which will be a delta function
+% EU is the contraction of this potential, i.e., P .* U
+
+sz = pot.sizes; % mysize(pot.p);
+if isempty(sz)
+ EU = pot.u;
+ policy = [];
+ return;
+end
+
+parent_size = prod(sz(1:end-1));
+self_size = sz(end);
+C = pot.p .* pot.u; % contraction
+C = reshape(C, parent_size, self_size);
+policy = zeros(parent_size, self_size);
+for i=1:parent_size
+ act = argmax(C(i,:));
+ policy(i, act) = 1;
+end
+policy = myreshape(policy, sz);
+EU = sum(C(:));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/CPD_to_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/CPD_to_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function pot = CPD_to_pot(pot_type, CPD, domain, ns, cnodes, evidence)
+% CPD_TO_POT Convert a CPD to a potential of the specified form, incorporating any evidence
+% pot = CPD_to_pot(pot_type, CPD, domain, node_sizes, cnodes, evidence)
+%
+% pot_type is one of 'd', 'g', or 'cg'.
+% domain is the domain of CPD.
+% node_sizes(i) is the size of node i.
+% cnodes = the cts nodes
+% evidence{i} is the evidence on the i'th node.
+
+switch pot_type
+ case 'd',
+ pot = CPD_to_dpot(CPD, domain, ns, cnodes, evidence);
+ case 'g',
+ pot = CPD_to_cpot(CPD, domain, ns, cnodes, evidence);
+ case 'cg',
+ pot = CPD_to_cgpot(CPD, domain, ns, cnodes, evidence);
+ otherwise,
+ error(['can''t handle pot_type ' pot_type]);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,7 @@
+/CPD_to_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/README/1.1.1.1/Wed May 29 15:59:58 2002//
+/check_for_cd_arcs.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/determine_pot_type.m/1.1.1.1/Wed May 29 15:59:58 2002//
+/genops.c/1.1.1.1/Sat Jul 28 15:43:40 2001//
+/mk_initial_pot.m/1.1.1.1/Wed May 29 15:59:58 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/CVS/Entries.Log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/CVS/Entries.Log Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9 @@
+A D/@cgpot////
+A D/@cpot////
+A D/@dpot////
+A D/@mpot////
+A D/@scgcpot////
+A D/@scgpot////
+A D/@upot////
+A D/Old////
+A D/Tables////
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/Old/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/Old/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,2 @@
+/comp_eff_node_sizes.m/1.1.1.1/Wed May 29 15:59:58 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/Old/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/Old/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/Old
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/Old/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/Old/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/Old/comp_eff_node_sizes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/Old/comp_eff_node_sizes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,11 @@
+function ens = comp_eff_node_sizes(ns, cnodes, ev, domain)
+
+dnodes = mysetdiff(1:length(ns), cnodes);
+odom = domain(~isemptycell(evidence(domain)));
+cdom = myintersect(cnodes, domain);
+ddom = myintersect(dnodes, domain);
+cobs = myintersect(cdom, odom);
+dobs = myintersect(ddom, odom);
+ens = ns;
+ens(cobs) = 0;
+ens(dobs) = 1;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/README
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/README Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,100 @@
+The following kinds of potentials are supported
+- dpot: discrete
+- upot: utility
+- mpot: Gaussian in moment form
+- cpot: Gaussian in canonical form
+- cgpot: conditional (mixture) Gaussian, a list of mpots/cpot
+- scgpot: stable conditional Gaussian, a list of scgcpots
+- scgcpot: just used by scgpot
+
+Many of these are described in the following book
+
+@book{Cowell99,
+ author = "R. G. Cowell and A. P. Dawid and S. L. Lauritzen and D. J. Spiegelhalter",
+ title = "Probabilistic Networks and Expert Systems",
+ year = 1999,
+ publisher = "Springer"
+}
+
+CPD_to_pot converts P(Z|A,B,...) to phi(A,B,...,Z).
+
+A table is like a dpot, except it is a structure, not an object.
+Code that uses tables is faster but less flexible.
+
+ -----------
+
+A potential is a joint probability distribution on a set of nodes,
+which we call the potential's domain (which is always sorted).
+A potential supports the operations of multiplication and
+marginalization.
+
+If the nodes are discrete, the potential can be represented as a table
+(multi-dimensional array). If the nodes are Gaussian, the potential
+can be represented as a quadratic form. If there are both discrete and
+Gaussian nodes, we use a table of quadratic forms. For details on the
+Gaussian case, see below.
+
+For discrete potentials, the 'sizes' field specifies the number of
+values each node in the domain can take on. For continuous potentials,
+the 'sizes' field specifies the block-size of each node.
+
+If some of the nodes are observed, extra complications arise. We
+handle the discrete and continuous cases differently. Suppose the
+domain is [X Y], with sizes [6 2], where X is observed to have value x.
+In the discrete case, the potential will have many zeros in it
+(T(X,:) will be 0 for all X ~= x), which can be inefficient. Instead,
+we set sizes to [1 2], to indicate that X has only one possible value
+(namely x). For continuous nodes, we set sizes = [0 2], to indicate that X no
+longer appears in the mean vector or covariance matrix (we must avoid
+0s in Sigma, lest it be uninvertible). When a potential is created, we
+assume the sizes of the nodes have been adjusted to include the
+evidence. This is so that the evidence can be incorporated at the
+outset, and thereafter the inference algorithms can ignore it.
+
+ ------------
+
+A Gaussian potential can be represented in terms of its
+moment characteristics (mu, Sigma, logp), or in terms of its canonical
+characteristics (g, h, K). Although the moment characteristics are
+more familiar, it turns out that canonical characteristics are
+more convenient for the junction tree algorithm, for the same kinds of
+reasons why backwards inference in an LDS uses the information form of
+the Kalman filter (see Murphy (1998a) for a discussion).
+
+When working with *conditional* Gaussian potentials, the method proposed
+by Lauritzen (1992), and implemented here, requires converting from
+canonical to moment form before marginalizing the discrete variables,
+and converting back from moment to canonical form before
+multiplying/dividing. A new algorithm, due to Lauritzen and Jensen
+(1999), works exclusively in moment form, and
+hence is more numerically stable. It can also handle 0s in the
+covariance matrix, i.e., deterministic relationships between cts
+variables. However, it has not yet been implemented,
+since it requires major changes to the jtree algorithm.
+
+In Murphy (1998b) we extend Lauritzen (1992) to handle
+vector-valued nodes. This means the vectors and matrices become block
+vectors and matrices. This manifests itself in the code as in the
+following example.
+Suppose we have a potential on nodes dom=[3,4,7] with block sizes=[2,1,3].
+Then nodes 3 and 7 correspond to blocks 1,3 which correspond to indices 1,2,4,5,6.
+>> find_equiv_posns([3 7], dom)=[1,3]
+>> block([1,3],blocks)=[1,2,4,5,6].
+
+For more details, see
+
+- "Filtering and Smoothing in Linear Dynamical Systems using the Junction Tree Algorithm",
+ K. Murphy, 1998a. UCB Tech Report.
+
+- "Inference and learning in hybrid Bayesian networks",
+ K. Murphy. UCB Technical Report CSD-98-990, 1998b.
+
+- "Propagation of probabilities, means and variances in mixed
+ graphical association models", S. L. Lauritzen, 1992, JASA 87(420):1098--1108.
+
+- "Causal probabilistic networks with both discrete and continuous variables",
+ K. G. Olesen, 1993. PAMI 3(15). This discusses implementation details.
+
+- "Stable local computation with Conditional Gaussian distributions",
+ S. Lauritzen and F. Jensen, 1999. Univ. Aalborg Tech Report R-99-2014.
+ www.math.auc.dk/research/Reports.html.
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+/divide_by_sparse_table.c/1.1.1.1/Wed May 29 15:59:58 2002//
+/divide_by_table.c/1.1.1.1/Wed May 29 15:59:58 2002//
+/divide_by_table.m/1.1.1.1/Thu Aug 5 15:25:54 2004//
+/extend_domain_table.m/1.1.1.1/Wed Aug 4 15:53:26 2004//
+/marg_sparse_table.c/1.1.1.1/Wed May 29 15:59:58 2002//
+/marg_table.c/1.1.1.1/Wed May 29 15:59:58 2002//
+/marg_table.m/1.1.1.1/Wed Aug 4 15:51:48 2004//
+/marg_tableC.c/1.1.1.1/Wed Oct 2 15:39:02 2002//
+/marg_tableM.m/1.1.1.1/Tue Oct 1 17:39:08 2002//
+/mult_by_sparse_table.c/1.1.1.1/Wed May 29 15:59:58 2002//
+/mult_by_table.c/1.1.1.1/Tue Oct 1 21:23:22 2002//
+/mult_by_table.m/1.1.1.1/Wed Aug 4 15:52:58 2004//
+/mult_by_table2.m/1.1.1.1/Wed Oct 2 15:30:32 2002//
+/mult_by_tableC.c/1.1.1.1/Tue Oct 1 21:33:50 2002//
+/mult_by_tableM.m/1.1.1.1/Wed Oct 2 15:28:48 2002//
+/mult_by_table_global.m/1.1.1.1/Sun Sep 29 10:21:30 2002//
+/rep_mult.c/1.1.1.1/Wed May 29 15:59:58 2002//
+/repmat_and_mult.c/1.1.1.1/Tue Oct 1 21:20:00 2002//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/BNT/potentials/Tables
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/divide_by_sparse_table.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/divide_by_sparse_table.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,126 @@
+/* divide_by_sparse_table.c ../potential/tables*/
+
+/******************************************/
+/* 6 input & 1 output */
+/* Big table [0] */
+/* Big domain [1] */
+/* big sizes [2] */
+/* Small table [3] */
+/* small domain [4] */
+/* small sizes [5] */
+/* */
+/* New big table[0] */
+/******************************************/
+
+#include
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i
+#include
+#include "mex.h"
+
+int compare(const void* src1, const void* src2){
+ int i1 = *(int*)src1 ;
+ int i2 = *(int*)src2 ;
+ return i1-i2 ;
+}
+
+void ind_subv(int index, const int *cumprod, int n, int *bsubv){
+ int i;
+
+ for (i = n-1; i >= 0; i--) {
+ bsubv[i] = ((int)floor(index / cumprod[i]));
+ index = index % cumprod[i];
+ }
+}
+
+int subv_ind(const int n, const int *cumprod, const int *subv){
+ int i, index=0;
+
+ for(i=0; i1
+ sz = bigsz;
+ sz(map) = 1; % don't replicate along small domain, which is shared
+ % we can use repmat instead of myrepmat, because we know length(sz)>1
+ smallT = repmat(smallT, sz(:)');
+ bigT(:) = bigT(:) .* smallT(:);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/mult_by_tableC.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/Tables/mult_by_tableC.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,114 @@
+/* mult_by_table.c ../potential/tables */
+
+
+/******************************************/
+/* 6 input & 1 output */
+/* Big table [0] */
+/* Big domain [1] */
+/* big sizes [2] */
+/* Small table [3] */
+/* small domain [4] */
+/* small sizes [5] */
+/* */
+/* New big table[0] */
+/******************************************/
+
+#include "mex.h"
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
+ int i, j, count, NB, NS, siz_b, siz_s, ndim, temp;
+ int *mask, *sx, *sy, *cpsy, *subs, *s, *cpsy2;
+ double *pbDomain, *psDomain, *sp, *zp, *bs;
+
+ plhs[0] = mxDuplicateArray(prhs[0]);
+ zp = mxGetPr(plhs[0]);
+
+ siz_b = mxGetNumberOfElements(prhs[1]);
+ siz_s = mxGetNumberOfElements(prhs[4]);
+ pbDomain = mxGetPr(prhs[1]);
+ psDomain = mxGetPr(prhs[4]);
+
+ NB = mxGetNumberOfElements(prhs[0]);
+ NS = mxGetNumberOfElements(prhs[3]);
+ sp = mxGetPr(prhs[3]);
+
+ bs = mxGetPr(prhs[2]);
+
+ if(NS == 1){
+ for(i=0; i 1)
+ mexErrMsgTxt("Too many output arguments.");
+
+ xnd = mxGetNumberOfDimensions(prhs[0]);
+ ynd = mxGetNumberOfDimensions(prhs[1]);
+ xdim = mxGetDimensions(prhs[0]);
+ ydim = mxGetDimensions(prhs[1]);
+ ndim = mxGetNumberOfElements(prhs[2]);
+
+ pSizes = mxGetPr(prhs[2]);
+
+ sx = (int *)malloc(sizeof(int)*ndim);
+ sy = (int *)malloc(sizeof(int)*ndim);
+ s = (int *)malloc(sizeof(int)*ndim);
+ s1 = (int *)malloc(sizeof(int)*ndim);
+ *(cpsx = (int *)malloc(sizeof(int)*ndim)) = 1;
+ *(cpsy = (int *)malloc(sizeof(int)*ndim)) = 1;
+ subs = (int *)malloc(sizeof(int)*ndim);
+ cpsx2 = (int *)malloc(sizeof(int)*ndim);
+ cpsy2 = (int *)malloc(sizeof(int)*ndim);
+ for(i=0; i 1)
+ mexErrMsgTxt("Too many output arguments.");
+
+ plhs[0] = mxDuplicateArray(prhs[0]);
+ zp = mxGetPr(plhs[0]);
+ sp = mxGetPr(prhs[1]);
+
+ xnd = mxGetNumberOfDimensions(prhs[0]);
+ ynd = mxGetNumberOfDimensions(prhs[1]);
+ xdim = mxGetDimensions(prhs[0]);
+ ydim = mxGetDimensions(prhs[1]);
+ ndim = xnd;
+
+ NB = mxGetNumberOfElements(prhs[0]);
+ NS = mxGetNumberOfElements(prhs[1]);
+
+ if(NS == 1){
+ for(i=0; iD links where the C node is hidden.
+% check_for_cd_arcs(onodes, cnodes, dag)
+%
+% We cannot convert the logistic/softmax function (C->D CPD) to a Gaussian potential
+% unless we use the variational approximation discussed in
+% "A variational approximation for Bayesian networks with discrete and continuous latent
+% variables", K. Murphy, UAI 1999.
+
+n = length(dag);
+hnodes = mysetdiff(1:n, onodes);
+chid = myintersect(cnodes, hnodes);
+dnodes = mysetdiff(1:n, cnodes);
+for i=chid(:)'
+ dcs = myintersect(children(dag, i), dnodes);
+ if ~isempty(dcs)
+ error(['hidden cts node ' num2str(i) ' has a discrete child']);
+ end
+end
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/determine_pot_type.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/determine_pot_type.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function pot_type = determine_pot_type(model, onodes, nodes)
+% DETERMINE_POT_TYPE Determine the type of potential based on the evidence pattern.
+% pot_type = determine_pot_type(model, onodes, nodes)
+%
+% If there are any utility nodes, pot_type = 'u'
+% else
+% If all hidden nodes are discrete, pot_type = 'd'.
+% If all hidden nodes are continuous, pot_type = 'g' (Gaussian).
+% If some hidden nodes are discrete, and some cts, pot_type = 'cg' (conditional Gaussian).
+%
+% nodes defaults to all nodes in graph
+
+nnodes = length(model.node_sizes);
+if nargin < 3, nodes = 1:nnodes; end
+
+hnodes = mysetdiff(nodes, onodes);
+if isfield(model, 'limid') %~isempty(model.utility_nodes)
+ pot_type = 'u';
+elseif isempty(myintersect(model.cnodes, hnodes))
+ pot_type = 'd';
+elseif mysubset(hnodes, model.cnodes)
+ pot_type = 'g';
+else
+ pot_type = 'cg';
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/genops.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/genops.c Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,707 @@
+/*
+
+ GENOPS.C
+ Generalized arithmetic operators overloading built-in functions.
+
+ written by Douglas M. Schwarz
+ schwarz@servtech.com
+ 26 December 1998
+ Last modified: 2 April 1999
+
+ Copyright 1998-1999 by Douglas M. Schwarz. All rights reserved.
+
+*/
+
+
+/*
+
+Build MEX file by entering the appropriate command at the MATLAB prompt
+(-D option is equivalent to #define in source file):
+
+mex genops.c -DPLUS_MEX -output plus
+mex genops.c -DMINUS_MEX -output minus
+mex genops.c -DTIMES_MEX -output times
+mex genops.c -DRDIVIDE_MEX -output rdivide
+mex genops.c -DLDIVIDE_MEX -output ldivide
+mex genops.c -DPOWER_MEX -output power
+mex genops.c -DEQ_MEX -output eq
+mex genops.c -DNE_MEX -output ne
+mex genops.c -DLT_MEX -output lt
+mex genops.c -DGT_MEX -output gt
+mex genops.c -DLE_MEX -output le
+mex genops.c -DGE_MEX -output ge
+
+*/
+
+/* This file has been formatted for a tab equal to 4 spaces. */
+
+#if defined(EQ_MEX) || defined(NE_MEX) || defined(LT_MEX) || defined(GT_MEX) \
+ || defined(LE_MEX) || defined(GE_MEX)
+#define RELOP_MEX
+#endif
+
+#include "mex.h"
+#include "matrix.h"
+#ifdef POWER_MEX
+#include
+#define PI 3.141592653589793
+#endif
+
+bool allequal(int, const int *, const int *);
+void removeZeroImag(double *, double *, int, const int *, int, mxArray **);
+
+#define xMat prhs[0]
+#define yMat prhs[1]
+#define zMat plhs[0]
+
+#define min(A,B) ((A) < (B) ? (A) : (B))
+#define max(A,B) ((A) > (B) ? (A) : (B))
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
+{
+ double *xrp, *xip, *yrp, *yip;
+#ifndef RELOP_MEX
+ double *zr, *zi, *zip;
+#endif
+ double *zrp, *zrend;
+ int xnd, ynd, numElements = 1;
+ const int *xdim, *ydim;
+ bool xcmplx, ycmplx;
+ mxClassID yclass;
+ int *s, ndim, *sx, *sy, i, *cpsx, *cpsy;
+ int *subs, *s1, *cpsx2, *cpsy2;
+ int ix = 0, iy = 0;
+ mxArray *args[3], *result[1];
+#if defined(RDIVIDE_MEX) || defined(LDIVIDE_MEX)
+ double denom;
+#endif
+#ifdef POWER_MEX
+ double mag, theta, phi, magx;
+ int flops = 0;
+#endif
+
+
+ if (nrhs != 2)
+ mexErrMsgTxt("Incorrect number of inputs.");
+
+ if (nlhs > 1)
+ mexErrMsgTxt("Too many output arguments.");
+
+ xnd = mxGetNumberOfDimensions(xMat);
+ ynd = mxGetNumberOfDimensions(yMat);
+ xdim = mxGetDimensions(xMat);
+ ydim = mxGetDimensions(yMat);
+
+ yclass = mxGetClassID(yMat);
+
+/* If the built-in function in MATLAB can handle the arguments
+ then use that. */
+ if (yclass != mxDOUBLE_CLASS ||
+ (xnd == 2 && xdim[0] == 1 && xdim[1] == 1) ||
+ (ynd == 2 && ydim[0] == 1 && ydim[1] == 1) ||
+ (xnd == ynd && allequal(xnd,xdim,ydim)))
+ {
+#ifdef PLUS_MEX
+ args[0] = mxCreateString("plus");
+#elif defined(MINUS_MEX)
+ args[0] = mxCreateString("minus");
+#elif defined(TIMES_MEX)
+ args[0] = mxCreateString("times");
+#elif defined(RDIVIDE_MEX)
+ args[0] = mxCreateString("rdivide");
+#elif defined(LDIVIDE_MEX)
+ args[0] = mxCreateString("ldivide");
+#elif defined(POWER_MEX)
+ args[0] = mxCreateString("power");
+#elif defined(EQ_MEX)
+ args[0] = mxCreateString("eq");
+#elif defined(NE_MEX)
+ args[0] = mxCreateString("ne");
+#elif defined(LT_MEX)
+ args[0] = mxCreateString("lt");
+#elif defined(GT_MEX)
+ args[0] = mxCreateString("gt");
+#elif defined(LE_MEX)
+ args[0] = mxCreateString("le");
+#elif defined(GE_MEX)
+ args[0] = mxCreateString("ge");
+#endif
+ args[1] = (mxArray *)xMat;
+ args[2] = (mxArray *)yMat;
+ mexCallMATLAB(1, result, 3, args, "builtin");
+ mxDestroyArray(args[0]);
+ zMat = result[0];
+ }
+ else /* X and Y are both N-D and different dimensionality. */
+ {
+ ndim = max(xnd,ynd);
+ sx = (int *)mxMalloc(sizeof(int)*ndim);
+ sy = (int *)mxMalloc(sizeof(int)*ndim);
+ s = (int *)mxMalloc(sizeof(int)*ndim);
+ s1 = (int *)mxMalloc(sizeof(int)*ndim);
+ *(cpsx = (int *)mxMalloc(sizeof(int)*ndim)) = 1;
+ *(cpsy = (int *)mxMalloc(sizeof(int)*ndim)) = 1;
+ subs = (int *)mxMalloc(sizeof(int)*ndim);
+ cpsx2 = (int *)mxMalloc(sizeof(int)*ndim);
+ cpsy2 = (int *)mxMalloc(sizeof(int)*ndim);
+ for (i = 0; i < ndim; i++)
+ {
+ subs[i] = 0;
+ sx[i] = (i < xnd) ? xdim[i] : 1;
+ sy[i] = (i < ynd) ? ydim[i] : 1;
+ if (sx[i] == sy[i])
+ s[i] = sx[i];
+ else if (sx[i] == 1)
+ s[i] = sy[i];
+ else if (sy[i] == 1)
+ s[i] = sx[i];
+ else
+ {
+ mxFree(sx);
+ mxFree(sy);
+ mxFree(s);
+ mxFree(s1);
+ mxFree(cpsx);
+ mxFree(cpsy);
+ mxFree(subs);
+ mxFree(cpsx2);
+ mxFree(cpsy2);
+ mexErrMsgTxt("Array dimensions are not appropriate.");
+ }
+ s1[i] = s[i] - 1;
+ numElements *= s[i];
+ }
+
+ for (i = 0; i < ndim-1; i++)
+ {
+ cpsx[i+1] = cpsx[i]*sx[i]--;
+ cpsy[i+1] = cpsy[i]*sy[i]--;
+ cpsx2[i] = cpsx[i]*sx[i];
+ cpsy2[i] = cpsy[i]*sy[i];
+ }
+ cpsx2[ndim-1] = cpsx[ndim-1]*(--sx[ndim-1]);
+ cpsy2[ndim-1] = cpsy[ndim-1]*(--sy[ndim-1]);
+
+ xcmplx = mxIsComplex(xMat);
+ ycmplx = mxIsComplex(yMat);
+
+ if (!xcmplx && !ycmplx) /* X and Y both N-D, both real. */
+ {
+#ifdef POWER_MEX
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxCOMPLEX);
+ zrp = zr = mxGetPr(zMat);
+ zip = zi = mxGetPi(zMat);
+#elif defined(RELOP_MEX)
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxREAL);
+ mxSetLogical(zMat);
+ zrp = mxGetPr(zMat);
+#else
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxREAL);
+ zrp = mxGetPr(zMat);
+#endif
+ xrp = mxGetPr(xMat);
+ yrp = mxGetPr(yMat);
+ zrend = zrp + numElements;
+ while (zrp < zrend)
+ {
+#ifdef PLUS_MEX
+ *zrp++ = *xrp + *yrp;
+#elif defined(MINUS_MEX)
+ *zrp++ = *xrp - *yrp;
+#elif defined(TIMES_MEX)
+ *zrp++ = *xrp * *yrp;
+#elif defined(RDIVIDE_MEX)
+ *zrp++ = *xrp / *yrp;
+#elif defined(LDIVIDE_MEX)
+ *zrp++ = *yrp / *xrp;
+#elif defined(POWER_MEX)
+ if (*xrp < 0.0 && *yrp != floor(*yrp))
+ {
+ mag = pow(-*xrp,*yrp);
+ theta = PI * *yrp;
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 18;
+ }
+ else
+ {
+ *zrp++ = pow(*xrp,*yrp);
+ *zip++ = 0.0;
+ flops++;
+ }
+#elif defined(EQ_MEX)
+ *zrp++ = (*xrp == *yrp);
+#elif defined(NE_MEX)
+ *zrp++ = (*xrp != *yrp);
+#elif defined(LT_MEX)
+ *zrp++ = (*xrp < *yrp);
+#elif defined(GT_MEX)
+ *zrp++ = (*xrp > *yrp);
+#elif defined(LE_MEX)
+ *zrp++ = (*xrp <= *yrp);
+#elif defined(GE_MEX)
+ *zrp++ = (*xrp >= *yrp);
+#endif
+ for (i = 0; i < ndim; i++)
+ {
+ if (subs[i] == s1[i])
+ {
+ subs[i] = 0;
+ if (sx[i])
+ xrp -= cpsx2[i];
+ if (sy[i])
+ yrp -= cpsy2[i];
+ }
+ else
+ {
+ subs[i]++;
+ if (sx[i])
+ xrp += cpsx[i];
+ if (sy[i])
+ yrp += cpsy[i];
+ break;
+ }
+ }
+ }
+#ifdef POWER_MEX
+ mexAddFlops(flops);
+ removeZeroImag(zr, zi, ndim, (const int *)s, numElements, &zMat);
+#elif !defined(RELOP_MEX)
+ mexAddFlops(numElements);
+#endif
+ }
+ else if (!xcmplx && ycmplx) /* X and Y both N-D, X real, Y complex. */
+ {
+#ifdef POWER_MEX
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxCOMPLEX);
+ zrp = zr = mxGetPr(zMat);
+ zip = zi = mxGetPi(zMat);
+#elif defined(RELOP_MEX)
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxREAL);
+ mxSetLogical(zMat);
+ zrp = mxGetPr(zMat);
+#else
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxCOMPLEX);
+ zrp = mxGetPr(zMat);
+ zip = mxGetPi(zMat);
+#endif
+ xrp = mxGetPr(xMat);
+ yrp = mxGetPr(yMat);
+ yip = mxGetPi(yMat);
+ zrend = zrp + numElements;
+ while (zrp < zrend)
+ {
+#ifdef PLUS_MEX
+ *zrp++ = *xrp + *yrp;
+ *zip++ = *yip;
+#elif defined(MINUS_MEX)
+ *zrp++ = *xrp - *yrp;
+ *zip++ = -*yip;
+#elif defined(TIMES_MEX)
+ *zrp++ = *xrp * *yrp;
+ *zip++ = *xrp * *yip;
+#elif defined(RDIVIDE_MEX)
+ denom = *yrp * *yrp + *yip * *yip;
+ *zrp++ = (*xrp * *yrp)/denom;
+ *zip++ = (-*xrp * *yip)/denom;
+#elif defined(LDIVIDE_MEX)
+ *zrp++ = *yrp / *xrp;
+ *zip++ = *yip / *xrp;
+#elif defined(POWER_MEX)
+ if (*yip == 0.0)
+ {
+ if (*xrp < 0.0 && *yrp != floor(*yrp))
+ {
+ mag = pow(-*xrp,*yrp);
+ theta = PI * *yrp;
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 18;
+ }
+ else
+ {
+ *zrp++ = pow(*xrp,*yrp);
+ *zip++ = 0.0;
+ flops++;
+ }
+ }
+ else
+ {
+ if (*xrp < 0.0)
+ {
+ mag = pow(-*xrp,*yrp)*exp(-PI * *yip);
+ theta = *yip * log(-*xrp) + PI * *yrp;
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 18;
+ }
+ else
+ {
+ mag = pow(*xrp,*yrp);
+ theta = *yip * log(*xrp);
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 13;
+ }
+ }
+#elif defined(EQ_MEX)
+ *zrp++ = (*xrp == *yrp) && (*yip == 0.0);
+#elif defined(NE_MEX)
+ *zrp++ = (*xrp != *yrp) || (*yip != 0.0);
+#elif defined(LT_MEX)
+ *zrp++ = (*xrp < *yrp);
+#elif defined(GT_MEX)
+ *zrp++ = (*xrp > *yrp);
+#elif defined(LE_MEX)
+ *zrp++ = (*xrp <= *yrp);
+#elif defined(GE_MEX)
+ *zrp++ = (*xrp >= *yrp);
+#endif
+ for (i = 0; i < ndim; i++)
+ {
+ if (subs[i] == s1[i])
+ {
+ subs[i] = 0;
+ if (sx[i])
+ xrp -= cpsx2[i];
+ if (sy[i])
+ {
+ yrp -= cpsy2[i];
+ yip -= cpsy2[i];
+ }
+ }
+ else
+ {
+ subs[i]++;
+ if (sx[i])
+ xrp += cpsx[i];
+ if (sy[i])
+ {
+ yrp += cpsy[i];
+ yip += cpsy[i];
+ }
+ break;
+ }
+ }
+ }
+#if defined(PLUS_MEX) || defined(MINUS_MEX)
+ mexAddFlops(2*numElements);
+#elif defined(TIMES_MEX) || defined(RDIVIDE_MEX) || defined(LDIVIDE_MEX)
+ mexAddFlops(6*numElements);
+#elif defined(POWER_MEX)
+ mexAddFlops(flops);
+ removeZeroImag(zr, zi, ndim, (const int *)s, numElements, &zMat);
+#endif
+ }
+ else if (xcmplx && !ycmplx) /* X and Y both N-D, X complex, Y real. */
+ {
+#ifdef POWER_MEX
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxCOMPLEX);
+ zrp = zr = mxGetPr(zMat);
+ zip = zi = mxGetPi(zMat);
+#elif defined(RELOP_MEX)
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxREAL);
+ mxSetLogical(zMat);
+ zrp = mxGetPr(zMat);
+#else
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxCOMPLEX);
+ zrp = mxGetPr(zMat);
+ zip = mxGetPi(zMat);
+#endif
+ xrp = mxGetPr(xMat);
+ xip = mxGetPi(xMat);
+ yrp = mxGetPr(yMat);
+ zrend = zrp + numElements;
+ while (zrp < zrend)
+ {
+#ifdef PLUS_MEX
+ *zrp++ = *xrp + *yrp;
+ *zip++ = *xip;
+#elif defined(MINUS_MEX)
+ *zrp++ = *xrp - *yrp;
+ *zip++ = *xip;
+#elif defined(TIMES_MEX)
+ *zrp++ = *xrp * *yrp;
+ *zip++ = *xip * *yrp;
+#elif defined(RDIVIDE_MEX)
+ *zrp++ = *xrp / *yrp;
+ *zip++ = *xip / *yrp;
+#elif defined(LDIVIDE_MEX)
+ denom = *xrp * *xrp + *xip * *xip;
+ *zrp++ = (*xrp * *yrp)/denom;
+ *zip++ = (-*xip * *yrp)/denom;
+#elif defined(POWER_MEX)
+ if (*xip == 0.0)
+ {
+ if (*xrp < 0.0 && *yrp != floor(*yrp))
+ {
+ mag = pow(-*xrp,*yrp);
+ theta = PI * *yrp;
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 18;
+ }
+ else
+ {
+ *zrp++ = pow(*xrp,*yrp);
+ *zip++ = 0.0;
+ flops++;
+ }
+ }
+ else
+ {
+ mag = pow(*xrp * *xrp + *xip * *xip,0.5 * *yrp);
+ theta = *yrp*atan2(*xip,*xrp);
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 18;
+ }
+#elif defined(EQ_MEX)
+ *zrp++ = (*xrp == *yrp) && (*xip == 0.0);
+#elif defined(NE_MEX)
+ *zrp++ = (*xrp != *yrp) || (*xip != 0.0);
+#elif defined(LT_MEX)
+ *zrp++ = (*xrp < *yrp);
+#elif defined(GT_MEX)
+ *zrp++ = (*xrp > *yrp);
+#elif defined(LE_MEX)
+ *zrp++ = (*xrp <= *yrp);
+#elif defined(GE_MEX)
+ *zrp++ = (*xrp >= *yrp);
+#endif
+ for (i = 0; i < ndim; i++)
+ {
+ if (subs[i] == s1[i])
+ {
+ subs[i] = 0;
+ if (sx[i])
+ {
+ xrp -= cpsx2[i];
+ xip -= cpsx2[i];
+ }
+ if (sy[i])
+ yrp -= cpsy2[i];
+ }
+ else
+ {
+ subs[i]++;
+ if (sx[i])
+ {
+ xrp += cpsx[i];
+ xip += cpsx[i];
+ }
+ if (sy[i])
+ yrp += cpsy[i];
+ break;
+ }
+ }
+ }
+#if defined(PLUS_MEX) || defined(MINUS_MEX)
+ mexAddFlops(2*numElements);
+#elif defined(TIMES_MEX) || defined(RDIVIDE_MEX) || defined(LDIVIDE_MEX)
+ mexAddFlops(6*numElements);
+#elif defined(POWER_MEX)
+ mexAddFlops(flops);
+ removeZeroImag(zr, zi, ndim, (const int *)s, numElements, &zMat);
+#endif
+ }
+ else if (xcmplx && ycmplx) /* X and Y both N-D, both complex. */
+ {
+#if defined(RELOP_MEX)
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxREAL);
+ mxSetLogical(zMat);
+ zrp = mxGetPr(zMat);
+#else
+ zMat = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxCOMPLEX);
+ zrp = zr = mxGetPr(zMat);
+ zip = zi = mxGetPi(zMat);
+#endif
+ xrp = mxGetPr(xMat);
+ xip = mxGetPi(xMat);
+ yrp = mxGetPr(yMat);
+ yip = mxGetPi(yMat);
+ zrend = zrp + numElements;
+ while (zrp < zrend)
+ {
+#ifdef PLUS_MEX
+ *zrp++ = *xrp + *yrp;
+ *zip++ = *xip + *yip;
+#elif defined(MINUS_MEX)
+ *zrp++ = *xrp - *yrp;
+ *zip++ = *xip - *yip;
+#elif defined(TIMES_MEX)
+ *zrp++ = *xrp * *yrp - *xip * *yip;
+ *zip++ = *xip * *yrp + *xrp * *yip;
+#elif defined(RDIVIDE_MEX)
+ denom = *yrp * *yrp + *yip * *yip;
+ *zrp++ = (*xrp * *yrp + *xip * *yip)/denom;
+ *zip++ = (*xip * *yrp - *xrp * *yip)/denom;
+#elif defined(LDIVIDE_MEX)
+ denom = *xrp * *xrp + *xip * *xip;
+ *zrp++ = (*xrp * *yrp + *xip * *yip)/denom;
+ *zip++ = (*xrp * *yip - *xip * *yrp)/denom;
+#elif defined(POWER_MEX)
+ if (*xip == 0.0 && *yip == 0.0)
+ {
+ if (*xrp < 0.0 && *yrp != floor(*yrp))
+ {
+ mag = pow(-*xrp,*yrp);
+ theta = PI * *yrp;
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 18;
+ }
+ else
+ {
+ *zrp++ = pow(*xrp,*yrp);
+ *zip++ = 0.0;
+ flops++;
+ }
+ }
+ else if (*xip == 0.0)
+ {
+ if (*xrp < 0.0)
+ {
+ mag = pow(-*xrp,*yrp)*exp(-PI * *yip);
+ theta = *yip * log(-*xrp) + PI * *yrp;
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 18;
+ }
+ else
+ {
+ mag = pow(*xrp,*yrp);
+ theta = *yip * log(*xrp);
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 13;
+ }
+ }
+ else if (*yip == 0.0)
+ {
+ mag = pow(*xrp * *xrp + *xip * *xip,0.5 * *yrp);
+ theta = *yrp * atan2(*xip,*xrp);
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 18;
+ }
+ else
+ {
+ magx = sqrt(*xrp * *xrp + *xip * *xip);
+ phi = atan2(*xip,*xrp);
+ mag = pow(magx,*yrp)*exp(-*yip * phi);
+ theta = *yip * log(magx) + *yrp * phi;
+ *zrp++ = mag*cos(theta);
+ *zip++ = mag*sin(theta);
+ flops += 18;
+ }
+#elif defined(EQ_MEX)
+ *zrp++ = (*xrp == *yrp) && (*xip == *yip);
+#elif defined(NE_MEX)
+ *zrp++ = (*xrp != *yrp) || (*xip != *yip);
+#elif defined(LT_MEX)
+ *zrp++ = (*xrp < *yrp);
+#elif defined(GT_MEX)
+ *zrp++ = (*xrp > *yrp);
+#elif defined(LE_MEX)
+ *zrp++ = (*xrp <= *yrp);
+#elif defined(GE_MEX)
+ *zrp++ = (*xrp >= *yrp);
+#endif
+ for (i = 0; i < ndim; i++)
+ {
+ if (subs[i] == s1[i])
+ {
+ subs[i] = 0;
+ if (sx[i])
+ {
+ xrp -= cpsx2[i];
+ xip -= cpsx2[i];
+ }
+ if (sy[i])
+ {
+ yrp -= cpsy2[i];
+ yip -= cpsy2[i];
+ }
+ }
+ else
+ {
+ subs[i]++;
+ if (sx[i])
+ {
+ xrp += cpsx[i];
+ xip += cpsx[i];
+ }
+ if (sy[i])
+ {
+ yrp += cpsy[i];
+ yip += cpsy[i];
+ }
+ break;
+ }
+ }
+ }
+#if defined(PLUS_MEX) || defined(MINUS_MEX)
+ mexAddFlops(2*numElements);
+#elif defined(TIMES_MEX) || defined(RDIVIDE_MEX) || defined(LDIVIDE_MEX)
+ mexAddFlops(6*numElements);
+#elif defined(POWER_MEX)
+ mexAddFlops(flops);
+#endif
+#ifndef RELOP_MEX
+ removeZeroImag(zr, zi, ndim, (const int *)s, numElements, &zMat);
+#endif
+ }
+ }
+}
+
+
+/***********************************************************
+* *
+* Tests to see if the vectors xdim and ydim are equal. *
+* *
+***********************************************************/
+bool allequal(int ndim, const int *xdim, const int *ydim)
+{
+ int i;
+ bool result = true;
+
+ for (i = 0; i < ndim; i++)
+ result = result && (xdim[i] == ydim[i]);
+
+ return(result);
+}
+
+
+/******************************************************************************
+* *
+* Tests to see if every imaginary element is identically zero and, if so, *
+* creates a new array which is real and copies the real elements to it. *
+* *
+******************************************************************************/
+void removeZeroImag(double *zr, double *zi, int ndim, const int *s,
+ int numElements, mxArray *plhs[])
+{
+ double *zrend, *ziend, *zip, *z1p, *z2p;
+ bool allImZero = true;
+ mxArray *temp;
+
+ zip = zi;
+ ziend = zi + numElements;
+ while (zip < ziend)
+ {
+ allImZero = allImZero && (*zip++ == 0.0);
+ if (!allImZero)
+ return;
+ }
+
+ temp = mxCreateNumericArray(ndim, s, mxDOUBLE_CLASS, mxREAL);
+ z1p = zr;
+ z2p = mxGetPr(temp);
+ zrend = z1p + numElements;
+ while (z1p < zrend)
+ *z2p++ = *z1p++;
+ mxDestroyArray(plhs[0]);
+ plhs[0] = temp;
+ return;
+}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/potentials/mk_initial_pot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/potentials/mk_initial_pot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function pot = mk_initial_pot(pot_type, dom, ns, cnodes, onodes)
+% MK_INITIAL_POT A "initial" potential is one which has not had any evidence entered into it.
+% pot = mk_initial_pot(pot_type, domain, node_sizes, cnodes, onodes)
+%
+% pot_type is one of 'd', 'g', 'cg' or 'u'
+% domain is the set of nodes to be included in the potential.
+% node_sizes(i) is the size of node i.
+
+switch pot_type
+ case 'd',
+ ns(onodes) = 1;
+ pot = dpot(dom, ns(dom));
+ case 'u',
+ ns(onodes) = 1;
+ pot = upot(dom, ns(dom));
+ case 'g',
+ ns(onodes) = 0;
+ pot = cpot(dom, ns(dom));
+ case 'cg',
+ dnodes = mysetdiff(1:length(ns), cnodes);
+ ddom = myintersect(dnodes, dom);
+ cdom = myintersect(cnodes, dom);
+ dobs = myintersect(dnodes, onodes);
+ cobs = myintersect(cnodes, onodes);
+ ns(dobs) = 1;
+ ns(cobs) = 0;
+ pot = cgpot(ddom, cdom, ns);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/test_BNT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/test_BNT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,108 @@
+% If all is well, all of these scripts should run without errors.
+
+
+% bnets
+cg1
+cg2
+discrete1
+fa1
+gaussian1
+gaussian2
+if exist('@gibbs_sampling_inf_engine/private/compute_posterior','file')
+ % only exists if installC has been run
+ gibbs_test1
+end
+learn1
+lw1
+mfa1
+mixexp1
+mixexp2
+mixexp3
+mog1
+mpe1
+mpe2
+qmr1
+qmr2
+sample1
+softev1
+softmax1
+sprinkler1
+
+
+% belief propagation
+belprop_polytree_discrete
+belprop_polytree_gauss % alag
+belprop_polytree_cg
+belprop_loop1_discrete
+belprop_loop1_gauss
+belprop_loopy_discrete
+belprop_loopy_gauss
+belprop_loopy_cg % like cg1
+
+
+% factor graphs
+%fg1 failed since marginals were not exact
+
+fg2
+fg3
+fg_mrf1
+fg_mrf2
+
+
+% Structure learning
+bic1
+cooper_yoo
+k2demo1
+mcmc1
+model_select1
+pc1
+%pc2 failed due to numerical problems in KPMstats/cond_indep_fisher_z
+
+
+
+
+% limids
+asia_dt1
+id1
+oil1
+pigs1
+
+
+% dbns
+arhmm1
+bat1
+bkff1
+chmm1
+dhmm1
+filter_test1
+ghmm1
+kalman1
+kjaerulff1
+loopy_dbn1
+mhmm1
+mildew1
+reveal1
+viterbi1
+water1
+
+
+% HHMMs
+abcd_hhmm
+sample_square_hhmm_discrete
+%learn_square_hhmm_cts
+sample_motif_hhmm
+
+%sparse jtree engine & ndx 2TBN engine
+if exist('@jtree_sparse_inf_engine/init_pot','file')
+ % only exists if installC has been run
+ discrete2
+ discrete3
+ filter_test1
+ water2
+end
+
+%find . -path '*.m' -exec wc -l {} \; | ~/count.pl
+
+% we cannot use tic;toc to time test_BNT, since functions within this script
+% reset the tic;toc timer. Hence we use the following:
+%clock0=clock; cpu0 = cputime; test_BNT; cpu=cputime-cpu0; elapsed=etime(clock, clock0)
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bnt/uninstallC_BNT.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bnt/uninstallC_BNT.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+dirs = {'potentials/Tables', ...
+ 'CPDs/@discrete_CPD', ...
+ 'inference/static/@jtree_sparse_inf_engine', ...
+ 'inference/static/@gibbs_sampling_inf_engine/private'};
+
+BNT_HOME = '/home/ai2/murphyk/matlab/FullBNT'; % edit this
+%global BNT_HOME
+
+for d=1:length(dirs)
+ f = fullfile(BNT_HOME, 'BNT', dirs{d});
+ fprintf('removing Cmex files from %s\n', f);
+ cd(f)
+ delete *.mex*
+ delete *.dll
+ delete *.obj
+ delete *.o
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/bntRoot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/bntRoot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4 @@
+function r = bntRoot()
+% Return directory name where bnt is stored
+ r = fileparts(which(mfilename));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/ChangeLog.Sourceforge.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/ChangeLog.Sourceforge.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,4436 @@
+
+
+2007-02-11 17:12 nsaunier
+
+ * BNT/learning/learn_struct_pdag_pc.m: Bug submitted by Imme Ebert-Uphoff (ebert@tree.com) (see Thu Feb 8, 2007 email on the BNT mailing list).
+
+2005-11-26 12:12 yozhik
+
+ * BNT/inference/dynamic/@hmm_inf_engine/fwdback_twoslice.m: merged
+ fwdback_twoslice.m to release branch
+
+2005-11-25 17:24 nsaunier
+
+ * BNT/inference/dynamic/@hmm_inf_engine/fwdback_twoslice.m: adding
+ old missing fwdback_twoslice.m
+
+2005-11-25 17:24 yozhik
+
+ * BNT/inference/dynamic/@hmm_inf_engine/fwdback_twoslice.m: file
+ fwdback_twoslice.m was added on branch release-1_0 on 2005-11-26
+ 20:12:05 +0000
+
+2005-09-25 15:54 yozhik
+
+ * BNT/add_BNT_to_path.m: fix paths
+
+2005-09-25 15:30 yozhik
+
+ * BNT/add_BNT_to_path.m: Restored directories to path.
+
+2005-09-25 15:29 yozhik
+
+ * HMM/fwdback_twoslice.m: added missing fwdback_twoslice
+
+2005-09-17 11:14 yozhik
+
+ * ChangeLog, BNT/add_BNT_to_path.m, BNT/test_BNT.m,
+ BNT/examples/static/cmp_inference_static.m,
+ BNT/inference/dynamic/@hmm_inf_engine/enter_evidence.m: Merged
+ bug fixes from HEAD.
+
+2005-09-17 11:11 yozhik
+
+ * ChangeLog: added change log
+
+2005-09-17 11:11 yozhik
+
+ * ChangeLog: file ChangeLog was added on branch release-1_0 on
+ 2005-09-17 18:14:47 +0000
+
+2005-09-17 10:00 yozhik
+
+ * BNT/inference/dynamic/@hmm_inf_engine/enter_evidence.m: Temporary
+ rollback to fix error, per Kevin.
+
+2005-09-17 09:59 yozhik
+
+ * BNT/examples/static/cmp_inference_static.m: Commented out
+ erroneous line, per Kevin.
+
+2005-09-17 09:58 yozhik
+
+ * BNT/add_BNT_to_path.m: Changed to require BNT_HOME to be
+ predefined.
+
+2005-09-17 09:56 yozhik
+
+ * BNT/test_BNT.m: Commented out problematic tests.
+
+2005-09-17 09:38 yozhik
+
+ * BNT/test_BNT.m: renable tests
+
+2005-09-12 22:18 yozhik
+
+ * KPMtools/pca_kpm.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-09-12 22:18 yozhik
+
+ * KPMtools/pca_kpm.m: Initial revision
+
+2005-08-29 10:44 yozhik
+
+ * graph/: README.txt, Old/best_first_elim_order.m,
+ Old/dag_to_jtree.m, Old/dfs.m, Old/dsep_test.m,
+ Old/mk_2D_lattice_slow.m, acyclic.m, assignEdgeNums.m,
+ best_first_elim_order.m, check_jtree_property.m,
+ check_triangulated.m, children.m, cliques_to_jtree.m,
+ cliques_to_strong_jtree.m, connected_graph.m,
+ dag_to_essential_graph.m, dfs.m, dfs_test.m, dijkstra.m,
+ family.m, graph_separated.m, graph_to_jtree.m,
+ min_subtree_con_nodes.m, minimum_spanning_tree.m, minspan.m,
+ mk_2D_lattice.m, mk_2D_lattice_slow.m, mk_adj_mat.m,
+ mk_adjmat_chain.m, mk_all_dags.m, mk_nbrs_of_dag.m,
+ mk_nbrs_of_digraph.m, mk_nbrs_of_digraph_broken.m,
+ mk_nbrs_of_digraph_not_vectorized.m, mk_rnd_dag.m,
+ mk_rnd_dag_given_edge_prob.m, mk_rooted_tree.m, mk_undirected.m,
+ moralize.m, neighbors.m, parents.m, pred2path.m,
+ reachability_graph.m, scc.m, strong_elim_order.m, test.m,
+ test_strong_root.m, topological_sort.m, trees.txt, triangulate.c,
+ triangulate.m, triangulate_2Dlattice_demo.m, triangulate_test.m:
+ Initial import of code base from Kevin Murphy.
+
+2005-08-29 10:44 yozhik
+
+ * graph/: README.txt, Old/best_first_elim_order.m,
+ Old/dag_to_jtree.m, Old/dfs.m, Old/dsep_test.m,
+ Old/mk_2D_lattice_slow.m, acyclic.m, assignEdgeNums.m,
+ best_first_elim_order.m, check_jtree_property.m,
+ check_triangulated.m, children.m, cliques_to_jtree.m,
+ cliques_to_strong_jtree.m, connected_graph.m,
+ dag_to_essential_graph.m, dfs.m, dfs_test.m, dijkstra.m,
+ family.m, graph_separated.m, graph_to_jtree.m,
+ min_subtree_con_nodes.m, minimum_spanning_tree.m, minspan.m,
+ mk_2D_lattice.m, mk_2D_lattice_slow.m, mk_adj_mat.m,
+ mk_adjmat_chain.m, mk_all_dags.m, mk_nbrs_of_dag.m,
+ mk_nbrs_of_digraph.m, mk_nbrs_of_digraph_broken.m,
+ mk_nbrs_of_digraph_not_vectorized.m, mk_rnd_dag.m,
+ mk_rnd_dag_given_edge_prob.m, mk_rooted_tree.m, mk_undirected.m,
+ moralize.m, neighbors.m, parents.m, pred2path.m,
+ reachability_graph.m, scc.m, strong_elim_order.m, test.m,
+ test_strong_root.m, topological_sort.m, trees.txt, triangulate.c,
+ triangulate.m, triangulate_2Dlattice_demo.m, triangulate_test.m:
+ Initial revision
+
+2005-08-26 18:08 yozhik
+
+ * KPMtools/fullfileKPM.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-08-26 18:08 yozhik
+
+ * KPMtools/fullfileKPM.m: Initial revision
+
+2005-08-21 13:00 yozhik
+
+ *
+ BNT/inference/static/@pearl_inf_engine/private/parallel_protocol.m:
+ Initial import of code base from Kevin Murphy.
+
+2005-08-21 13:00 yozhik
+
+ *
+ BNT/inference/static/@pearl_inf_engine/private/parallel_protocol.m:
+ Initial revision
+
+2005-07-11 12:07 yozhik
+
+ * KPMtools/plotcov2New.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-07-11 12:07 yozhik
+
+ * KPMtools/plotcov2New.m: Initial revision
+
+2005-07-06 12:32 yozhik
+
+ * KPMtools/montageKPM2.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-07-06 12:32 yozhik
+
+ * KPMtools/montageKPM2.m: Initial revision
+
+2005-06-27 18:35 yozhik
+
+ * KPMtools/montageKPM3.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-06-27 18:35 yozhik
+
+ * KPMtools/montageKPM3.m: Initial revision
+
+2005-06-27 18:30 yozhik
+
+ * KPMtools/cell2matPad.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-06-27 18:30 yozhik
+
+ * KPMtools/cell2matPad.m: Initial revision
+
+2005-06-15 14:13 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/gaussian_CPD.m: Initial import of code
+ base from Kevin Murphy.
+
+2005-06-15 14:13 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/gaussian_CPD.m: Initial revision
+
+2005-06-08 18:56 yozhik
+
+ * Kalman/testKalman.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-06-08 18:56 yozhik
+
+ * Kalman/testKalman.m: Initial revision
+
+2005-06-08 18:25 yozhik
+
+ * HMM/: testHMM.m, fixed_lag_smoother_demo.m: Initial import of
+ code base from Kevin Murphy.
+
+2005-06-08 18:25 yozhik
+
+ * HMM/: testHMM.m, fixed_lag_smoother_demo.m: Initial revision
+
+2005-06-08 18:22 yozhik
+
+ * HMM/: README.txt, dhmm_em.m: Initial import of code base from
+ Kevin Murphy.
+
+2005-06-08 18:22 yozhik
+
+ * HMM/: README.txt, dhmm_em.m: Initial revision
+
+2005-06-08 18:17 yozhik
+
+ * HMM/fwdback.m: Initial import of code base from Kevin Murphy.
+
+2005-06-08 18:17 yozhik
+
+ * HMM/fwdback.m: Initial revision
+
+2005-06-05 11:46 yozhik
+
+ * KPMtools/: rectintLoopC.c, rectintLoopC.dll: Initial import of
+ code base from Kevin Murphy.
+
+2005-06-05 11:46 yozhik
+
+ * KPMtools/: rectintLoopC.c, rectintLoopC.dll: Initial revision
+
+2005-06-01 12:39 yozhik
+
+ * KPMtools/montageKPM.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-06-01 12:39 yozhik
+
+ * KPMtools/montageKPM.m: Initial revision
+
+2005-05-31 21:49 yozhik
+
+ * KPMtools/initFigures.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-31 21:49 yozhik
+
+ * KPMtools/initFigures.m: Initial revision
+
+2005-05-31 11:19 yozhik
+
+ * KPMstats/unidrndKPM.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-31 11:19 yozhik
+
+ * KPMstats/unidrndKPM.m: Initial revision
+
+2005-05-30 15:08 yozhik
+
+ * KPMtools/filepartsLast.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-30 15:08 yozhik
+
+ * KPMtools/filepartsLast.m: Initial revision
+
+2005-05-29 23:01 yozhik
+
+ * KPMtools/plotBox.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-29 23:01 yozhik
+
+ * KPMtools/plotBox.m: Initial revision
+
+2005-05-25 18:31 yozhik
+
+ * KPMtools/plotColors.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-25 18:31 yozhik
+
+ * KPMtools/plotColors.m: Initial revision
+
+2005-05-25 12:11 yozhik
+
+ * KPMtools/genpathKPM.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-25 12:11 yozhik
+
+ * KPMtools/genpathKPM.m: Initial revision
+
+2005-05-23 17:03 yozhik
+
+ * netlab3.3/demhmc1.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-23 17:03 yozhik
+
+ * netlab3.3/demhmc1.m: Initial revision
+
+2005-05-23 16:44 yozhik
+
+ * netlab3.3/gmminit.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-23 16:44 yozhik
+
+ * netlab3.3/gmminit.m: Initial revision
+
+2005-05-23 16:07 yozhik
+
+ * netlab3.3/metrop.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-23 16:07 yozhik
+
+ * netlab3.3/metrop.m: Initial revision
+
+2005-05-22 23:23 yozhik
+
+ * netlab3.3/demmet1.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-22 23:23 yozhik
+
+ * netlab3.3/demmet1.m: Initial revision
+
+2005-05-22 16:32 yozhik
+
+ * KPMstats/: dirichletrnd.m, dirichletpdf.m, test_dir.m,
+ multirnd.m, multipdf.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-22 16:32 yozhik
+
+ * KPMstats/: dirichletrnd.m, dirichletpdf.m, test_dir.m,
+ multirnd.m, multipdf.m: Initial revision
+
+2005-05-13 13:52 yozhik
+
+ * KPMtools/: asort.m, dirKPM.m: Initial import of code base from
+ Kevin Murphy.
+
+2005-05-13 13:52 yozhik
+
+ * KPMtools/: asort.m, dirKPM.m: Initial revision
+
+2005-05-09 18:32 yozhik
+
+ * netlab3.3/dem2ddat.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-09 18:32 yozhik
+
+ * netlab3.3/dem2ddat.m: Initial revision
+
+2005-05-09 15:20 yozhik
+
+ * KPMtools/: mkdirKPM.m, optimalMatching.m, optimalMatchingTest.m,
+ subsets1.m: Initial import of code base from Kevin Murphy.
+
+2005-05-09 15:20 yozhik
+
+ * KPMtools/: mkdirKPM.m, optimalMatching.m, optimalMatchingTest.m,
+ subsets1.m: Initial revision
+
+2005-05-09 09:47 yozhik
+
+ * KPMtools/bipartiteMatchingDemo.m: Initial import of code base
+ from Kevin Murphy.
+
+2005-05-09 09:47 yozhik
+
+ * KPMtools/bipartiteMatchingDemo.m: Initial revision
+
+2005-05-08 22:25 yozhik
+
+ * KPMtools/bipartiteMatchingIntProg.m: Initial import of code base
+ from Kevin Murphy.
+
+2005-05-08 22:25 yozhik
+
+ * KPMtools/bipartiteMatchingIntProg.m: Initial revision
+
+2005-05-08 21:45 yozhik
+
+ * KPMtools/bipartiteMatchingDemoPlot.m: Initial import of code base
+ from Kevin Murphy.
+
+2005-05-08 21:45 yozhik
+
+ * KPMtools/bipartiteMatchingDemoPlot.m: Initial revision
+
+2005-05-08 19:55 yozhik
+
+ * KPMtools/subsetsFixedSize.m: Initial import of code base from
+ Kevin Murphy.
+
+2005-05-08 19:55 yozhik
+
+ * KPMtools/subsetsFixedSize.m: Initial revision
+
+2005-05-08 15:48 yozhik
+
+ * KPMtools/centeringMatrix.m: Initial import of code base from
+ Kevin Murphy.
+
+2005-05-08 15:48 yozhik
+
+ * KPMtools/centeringMatrix.m: Initial revision
+
+2005-05-08 10:51 yozhik
+
+ * netlab3.3/demgmm1.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-08 10:51 yozhik
+
+ * netlab3.3/demgmm1.m: Initial revision
+
+2005-05-06 18:09 yozhik
+
+ * BNT/add_BNT_to_path.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-06 18:09 yozhik
+
+ * BNT/add_BNT_to_path.m: Initial revision
+
+2005-05-03 21:35 yozhik
+
+ * KPMstats/standardize.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-03 21:35 yozhik
+
+ * KPMstats/standardize.m: Initial revision
+
+2005-05-03 13:18 yozhik
+
+ * KPMstats/histCmpChi2.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-03 13:18 yozhik
+
+ * KPMstats/histCmpChi2.m: Initial revision
+
+2005-05-03 12:01 yozhik
+
+ * KPMtools/strsplit.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-05-03 12:01 yozhik
+
+ * KPMtools/strsplit.m: Initial revision
+
+2005-05-02 13:19 yozhik
+
+ * KPMtools/hsvKPM.m: Initial import of code base from Kevin Murphy.
+
+2005-05-02 13:19 yozhik
+
+ * KPMtools/hsvKPM.m: Initial revision
+
+2005-04-27 11:34 yozhik
+
+ * BNT/potentials/@dpot/: subsasgn.m, subsref.m: Initial import of
+ code base from Kevin Murphy.
+
+2005-04-27 11:34 yozhik
+
+ * BNT/potentials/@dpot/: subsasgn.m, subsref.m: Initial revision
+
+2005-04-27 10:58 yozhik
+
+ * KPMtools/mahal2conf.m, nethelp3.3/conffig.htm,
+ nethelp3.3/confmat.htm, nethelp3.3/conjgrad.htm,
+ nethelp3.3/consist.htm, nethelp3.3/convertoldnet.htm,
+ nethelp3.3/datread.htm, nethelp3.3/datwrite.htm,
+ nethelp3.3/dem2ddat.htm, nethelp3.3/demard.htm,
+ nethelp3.3/demev1.htm, nethelp3.3/demev2.htm,
+ nethelp3.3/demev3.htm, nethelp3.3/demgauss.htm,
+ nethelp3.3/demglm1.htm, nethelp3.3/demglm2.htm,
+ nethelp3.3/demgmm1.htm, nethelp3.3/demgmm2.htm,
+ nethelp3.3/demgmm3.htm, nethelp3.3/demgmm4.htm,
+ nethelp3.3/demgmm5.htm, nethelp3.3/demgp.htm,
+ nethelp3.3/demgpard.htm, nethelp3.3/demgpot.htm,
+ nethelp3.3/demgtm1.htm, nethelp3.3/demgtm2.htm,
+ nethelp3.3/demhint.htm, nethelp3.3/demhmc1.htm,
+ nethelp3.3/demhmc2.htm, nethelp3.3/demhmc3.htm,
+ nethelp3.3/demkmn1.htm, nethelp3.3/demknn1.htm,
+ nethelp3.3/demmdn1.htm, nethelp3.3/demmet1.htm,
+ nethelp3.3/demmlp1.htm, nethelp3.3/demmlp2.htm,
+ nethelp3.3/demnlab.htm, nethelp3.3/demns1.htm,
+ nethelp3.3/demolgd1.htm, nethelp3.3/demopt1.htm,
+ nethelp3.3/dempot.htm, nethelp3.3/demprgp.htm,
+ nethelp3.3/demprior.htm, nethelp3.3/demrbf1.htm,
+ nethelp3.3/demsom1.htm, nethelp3.3/demtrain.htm,
+ nethelp3.3/dist2.htm, nethelp3.3/eigdec.htm,
+ nethelp3.3/errbayes.htm, nethelp3.3/evidence.htm,
+ nethelp3.3/fevbayes.htm, nethelp3.3/gauss.htm,
+ nethelp3.3/gbayes.htm, nethelp3.3/glm.htm,
+ nethelp3.3/glmderiv.htm, nethelp3.3/glmerr.htm,
+ nethelp3.3/glmevfwd.htm, nethelp3.3/glmfwd.htm,
+ nethelp3.3/glmgrad.htm, nethelp3.3/glmhess.htm,
+ nethelp3.3/glminit.htm, nethelp3.3/glmpak.htm,
+ nethelp3.3/glmtrain.htm, nethelp3.3/glmunpak.htm,
+ nethelp3.3/gmm.htm, nethelp3.3/gmmactiv.htm,
+ nethelp3.3/gmmem.htm, nethelp3.3/gmminit.htm,
+ nethelp3.3/gmmpak.htm, nethelp3.3/gmmpost.htm,
+ nethelp3.3/gmmprob.htm, nethelp3.3/gmmsamp.htm,
+ nethelp3.3/gmmunpak.htm, nethelp3.3/gp.htm,
+ nethelp3.3/gpcovar.htm, nethelp3.3/gpcovarf.htm,
+ nethelp3.3/gpcovarp.htm, nethelp3.3/gperr.htm,
+ nethelp3.3/gpfwd.htm, nethelp3.3/gpgrad.htm,
+ nethelp3.3/gpinit.htm, nethelp3.3/gppak.htm,
+ nethelp3.3/gpunpak.htm, nethelp3.3/gradchek.htm,
+ nethelp3.3/graddesc.htm, nethelp3.3/gsamp.htm,
+ nethelp3.3/gtm.htm, nethelp3.3/gtmem.htm, nethelp3.3/gtmfwd.htm,
+ nethelp3.3/gtminit.htm, nethelp3.3/gtmlmean.htm,
+ nethelp3.3/gtmlmode.htm, nethelp3.3/gtmmag.htm,
+ nethelp3.3/gtmpost.htm, nethelp3.3/gtmprob.htm,
+ nethelp3.3/hbayes.htm, nethelp3.3/hesschek.htm,
+ nethelp3.3/hintmat.htm, nethelp3.3/hinton.htm,
+ nethelp3.3/histp.htm, nethelp3.3/hmc.htm, nethelp3.3/index.htm,
+ nethelp3.3/kmeans.htm, nethelp3.3/knn.htm, nethelp3.3/knnfwd.htm,
+ nethelp3.3/linef.htm, nethelp3.3/linemin.htm,
+ nethelp3.3/maxitmess.htm, nethelp3.3/mdn.htm,
+ nethelp3.3/mdn2gmm.htm, nethelp3.3/mdndist2.htm,
+ nethelp3.3/mdnerr.htm, nethelp3.3/mdnfwd.htm,
+ nethelp3.3/mdngrad.htm, nethelp3.3/mdninit.htm,
+ nethelp3.3/mdnpak.htm, nethelp3.3/mdnpost.htm,
+ nethelp3.3/mdnprob.htm, nethelp3.3/mdnunpak.htm,
+ nethelp3.3/metrop.htm, nethelp3.3/minbrack.htm,
+ nethelp3.3/mlp.htm, nethelp3.3/mlpbkp.htm,
+ nethelp3.3/mlpderiv.htm, nethelp3.3/mlperr.htm,
+ nethelp3.3/mlpevfwd.htm, nethelp3.3/mlpfwd.htm,
+ nethelp3.3/mlpgrad.htm, nethelp3.3/mlphdotv.htm,
+ nethelp3.3/mlphess.htm, nethelp3.3/mlphint.htm,
+ nethelp3.3/mlpinit.htm, nethelp3.3/mlppak.htm,
+ nethelp3.3/mlpprior.htm, nethelp3.3/mlptrain.htm,
+ nethelp3.3/mlpunpak.htm, nethelp3.3/netderiv.htm,
+ nethelp3.3/neterr.htm, nethelp3.3/netevfwd.htm,
+ nethelp3.3/netgrad.htm, nethelp3.3/nethelp3.3.zip,
+ nethelp3.3/nethess.htm, nethelp3.3/netinit.htm,
+ nethelp3.3/netopt.htm, nethelp3.3/netpak.htm,
+ nethelp3.3/netunpak.htm, nethelp3.3/olgd.htm, nethelp3.3/pca.htm,
+ nethelp3.3/plotmat.htm, nethelp3.3/ppca.htm,
+ nethelp3.3/quasinew.htm, nethelp3.3/rbf.htm,
+ nethelp3.3/rbfbkp.htm, nethelp3.3/rbfderiv.htm,
+ nethelp3.3/rbferr.htm, nethelp3.3/rbfevfwd.htm,
+ nethelp3.3/rbffwd.htm, nethelp3.3/rbfgrad.htm,
+ nethelp3.3/rbfhess.htm, nethelp3.3/rbfjacob.htm,
+ nethelp3.3/rbfpak.htm, nethelp3.3/rbfprior.htm,
+ nethelp3.3/rbfsetbf.htm, nethelp3.3/rbfsetfw.htm,
+ nethelp3.3/rbftrain.htm, nethelp3.3/rbfunpak.htm,
+ nethelp3.3/rosegrad.htm, nethelp3.3/rosen.htm,
+ nethelp3.3/scg.htm, nethelp3.3/som.htm, nethelp3.3/somfwd.htm,
+ nethelp3.3/sompak.htm, nethelp3.3/somtrain.htm,
+ nethelp3.3/somunpak.htm, netlab3.3/Contents.m, netlab3.3/LICENSE,
+ netlab3.3/conffig.m, netlab3.3/confmat.m, netlab3.3/conjgrad.m,
+ netlab3.3/consist.m, netlab3.3/convertoldnet.m,
+ netlab3.3/datread.m, netlab3.3/datwrite.m, netlab3.3/demard.m,
+ netlab3.3/demev1.m, netlab3.3/demev2.m, netlab3.3/demev3.m,
+ netlab3.3/demgauss.m, netlab3.3/demglm1.m, netlab3.3/demglm2.m,
+ netlab3.3/demgmm2.m, netlab3.3/demgmm3.m, netlab3.3/demgmm4.m,
+ netlab3.3/demgmm5.m, netlab3.3/demgp.m, netlab3.3/demgpard.m,
+ netlab3.3/demgpot.m, netlab3.3/demgtm1.m, netlab3.3/demgtm2.m,
+ netlab3.3/demhint.m, netlab3.3/demhmc2.m, netlab3.3/demhmc3.m,
+ netlab3.3/demkmn1.m, netlab3.3/demknn1.m, netlab3.3/demmdn1.m,
+ netlab3.3/demmlp1.m, netlab3.3/demmlp2.m, netlab3.3/demnlab.m,
+ netlab3.3/demns1.m, netlab3.3/demolgd1.m, netlab3.3/demopt1.m,
+ netlab3.3/dempot.m, netlab3.3/demprgp.m, netlab3.3/demprior.m,
+ netlab3.3/demrbf1.m, netlab3.3/demsom1.m, netlab3.3/demtrain.m,
+ netlab3.3/dist2.m, netlab3.3/eigdec.m, netlab3.3/errbayes.m,
+ netlab3.3/evidence.m, netlab3.3/fevbayes.m, netlab3.3/gauss.m,
+ netlab3.3/gbayes.m, netlab3.3/glm.m, netlab3.3/glmderiv.m,
+ netlab3.3/glmerr.m, netlab3.3/glmevfwd.m, netlab3.3/glmfwd.m,
+ netlab3.3/glmgrad.m, netlab3.3/glmhess.m, netlab3.3/glminit.m,
+ netlab3.3/glmpak.m, netlab3.3/glmtrain.m, netlab3.3/glmunpak.m,
+ netlab3.3/gmm.m, netlab3.3/gmmactiv.m, netlab3.3/gmmem.m,
+ netlab3.3/gmmpak.m, netlab3.3/gmmpost.m, netlab3.3/gmmprob.m,
+ netlab3.3/gmmsamp.m, netlab3.3/gmmunpak.m, netlab3.3/gp.m,
+ netlab3.3/gpcovar.m, netlab3.3/gpcovarf.m, netlab3.3/gpcovarp.m,
+ netlab3.3/gperr.m, netlab3.3/gpfwd.m, netlab3.3/gpgrad.m,
+ netlab3.3/gpinit.m, netlab3.3/gppak.m, netlab3.3/gpunpak.m,
+ netlab3.3/gradchek.m, netlab3.3/graddesc.m, netlab3.3/gsamp.m,
+ netlab3.3/gtm.m, netlab3.3/gtmem.m, netlab3.3/gtmfwd.m,
+ netlab3.3/gtminit.m, netlab3.3/gtmlmean.m, netlab3.3/gtmlmode.m,
+ netlab3.3/gtmmag.m, netlab3.3/gtmpost.m, netlab3.3/gtmprob.m,
+ netlab3.3/hbayes.m, netlab3.3/hesschek.m, netlab3.3/hintmat.m,
+ netlab3.3/hinton.m, netlab3.3/histp.m, netlab3.3/hmc.m,
+ netlab3.3/kmeansNetlab.m, netlab3.3/knn.m, netlab3.3/knnfwd.m,
+ netlab3.3/linef.m, netlab3.3/linemin.m, netlab3.3/maxitmess.m,
+ netlab3.3/mdn.m, netlab3.3/mdn2gmm.m, netlab3.3/mdndist2.m,
+ netlab3.3/mdnerr.m, netlab3.3/mdnfwd.m, netlab3.3/mdngrad.m,
+ netlab3.3/mdninit.m, netlab3.3/mdnnet.mat, netlab3.3/mdnpak.m,
+ netlab3.3/mdnpost.m, netlab3.3/mdnprob.m, netlab3.3/mdnunpak.m,
+ netlab3.3/minbrack.m, netlab3.3/mlp.m, netlab3.3/mlpbkp.m,
+ netlab3.3/mlpderiv.m, netlab3.3/mlperr.m, netlab3.3/mlpevfwd.m,
+ netlab3.3/mlpfwd.m, netlab3.3/mlpgrad.m, netlab3.3/mlphdotv.m,
+ netlab3.3/mlphess.m, netlab3.3/mlphint.m, netlab3.3/mlpinit.m,
+ netlab3.3/mlppak.m, netlab3.3/mlpprior.m, netlab3.3/mlptrain.m,
+ netlab3.3/mlpunpak.m, netlab3.3/netderiv.m, netlab3.3/neterr.m,
+ netlab3.3/netevfwd.m, netlab3.3/netgrad.m, netlab3.3/nethess.m,
+ netlab3.3/netinit.m, netlab3.3/netlab3.3.zip,
+ netlab3.3/netlogo.mat, netlab3.3/netopt.m, netlab3.3/netpak.m,
+ netlab3.3/netunpak.m, netlab3.3/oilTrn.dat, netlab3.3/oilTst.dat,
+ netlab3.3/olgd.m, netlab3.3/pca.m, netlab3.3/plotmat.m,
+ netlab3.3/ppca.m, netlab3.3/quasinew.m, netlab3.3/rbf.m,
+ netlab3.3/rbfbkp.m, netlab3.3/rbfderiv.m, netlab3.3/rbferr.m,
+ netlab3.3/rbfevfwd.m, netlab3.3/rbffwd.m, netlab3.3/rbfgrad.m,
+ netlab3.3/rbfhess.m, netlab3.3/rbfjacob.m, netlab3.3/rbfpak.m,
+ netlab3.3/rbfprior.m, netlab3.3/rbfsetbf.m, netlab3.3/rbfsetfw.m,
+ netlab3.3/rbftrain.m, netlab3.3/rbfunpak.m, netlab3.3/rosegrad.m,
+ netlab3.3/rosen.m, netlab3.3/scg.m, netlab3.3/som.m,
+ netlab3.3/somfwd.m, netlab3.3/sompak.m, netlab3.3/somtrain.m,
+ netlab3.3/somunpak.m, netlab3.3/xor.dat, netlabKPM/README.txt,
+ netlabKPM/demgmm1_movie.m, netlabKPM/evidence_weighted.m,
+ netlabKPM/glmerr_weighted.m, netlabKPM/glmgrad_weighted.m,
+ netlabKPM/glmhess_weighted.m, netlabKPM/glmtrain_weighted.m,
+ netlabKPM/gmm1.avi, netlabKPM/gmmem2.m,
+ netlabKPM/gmmem_multi_restart.m, netlabKPM/kmeans_demo.m,
+ netlabKPM/mlperr_weighted.m, netlabKPM/mlpgrad_weighted.m,
+ netlabKPM/mlphdotv_weighted.m, netlabKPM/mlphess_weighted.m,
+ netlabKPM/neterr_weighted.m, netlabKPM/netgrad_weighted.m,
+ netlabKPM/nethess_weighted.m, netlabKPM/netopt_weighted.m,
+ netlabKPM/process_options.m: Initial import of code base from
+ Kevin Murphy.
+
+2005-04-27 10:58 yozhik
+
+ * KPMtools/mahal2conf.m, nethelp3.3/conffig.htm,
+ nethelp3.3/confmat.htm, nethelp3.3/conjgrad.htm,
+ nethelp3.3/consist.htm, nethelp3.3/convertoldnet.htm,
+ nethelp3.3/datread.htm, nethelp3.3/datwrite.htm,
+ nethelp3.3/dem2ddat.htm, nethelp3.3/demard.htm,
+ nethelp3.3/demev1.htm, nethelp3.3/demev2.htm,
+ nethelp3.3/demev3.htm, nethelp3.3/demgauss.htm,
+ nethelp3.3/demglm1.htm, nethelp3.3/demglm2.htm,
+ nethelp3.3/demgmm1.htm, nethelp3.3/demgmm2.htm,
+ nethelp3.3/demgmm3.htm, nethelp3.3/demgmm4.htm,
+ nethelp3.3/demgmm5.htm, nethelp3.3/demgp.htm,
+ nethelp3.3/demgpard.htm, nethelp3.3/demgpot.htm,
+ nethelp3.3/demgtm1.htm, nethelp3.3/demgtm2.htm,
+ nethelp3.3/demhint.htm, nethelp3.3/demhmc1.htm,
+ nethelp3.3/demhmc2.htm, nethelp3.3/demhmc3.htm,
+ nethelp3.3/demkmn1.htm, nethelp3.3/demknn1.htm,
+ nethelp3.3/demmdn1.htm, nethelp3.3/demmet1.htm,
+ nethelp3.3/demmlp1.htm, nethelp3.3/demmlp2.htm,
+ nethelp3.3/demnlab.htm, nethelp3.3/demns1.htm,
+ nethelp3.3/demolgd1.htm, nethelp3.3/demopt1.htm,
+ nethelp3.3/dempot.htm, nethelp3.3/demprgp.htm,
+ nethelp3.3/demprior.htm, nethelp3.3/demrbf1.htm,
+ nethelp3.3/demsom1.htm, nethelp3.3/demtrain.htm,
+ nethelp3.3/dist2.htm, nethelp3.3/eigdec.htm,
+ nethelp3.3/errbayes.htm, nethelp3.3/evidence.htm,
+ nethelp3.3/fevbayes.htm, nethelp3.3/gauss.htm,
+ nethelp3.3/gbayes.htm, nethelp3.3/glm.htm,
+ nethelp3.3/glmderiv.htm, nethelp3.3/glmerr.htm,
+ nethelp3.3/glmevfwd.htm, nethelp3.3/glmfwd.htm,
+ nethelp3.3/glmgrad.htm, nethelp3.3/glmhess.htm,
+ nethelp3.3/glminit.htm, nethelp3.3/glmpak.htm,
+ nethelp3.3/glmtrain.htm, nethelp3.3/glmunpak.htm,
+ nethelp3.3/gmm.htm, nethelp3.3/gmmactiv.htm,
+ nethelp3.3/gmmem.htm, nethelp3.3/gmminit.htm,
+ nethelp3.3/gmmpak.htm, nethelp3.3/gmmpost.htm,
+ nethelp3.3/gmmprob.htm, nethelp3.3/gmmsamp.htm,
+ nethelp3.3/gmmunpak.htm, nethelp3.3/gp.htm,
+ nethelp3.3/gpcovar.htm, nethelp3.3/gpcovarf.htm,
+ nethelp3.3/gpcovarp.htm, nethelp3.3/gperr.htm,
+ nethelp3.3/gpfwd.htm, nethelp3.3/gpgrad.htm,
+ nethelp3.3/gpinit.htm, nethelp3.3/gppak.htm,
+ nethelp3.3/gpunpak.htm, nethelp3.3/gradchek.htm,
+ nethelp3.3/graddesc.htm, nethelp3.3/gsamp.htm,
+ nethelp3.3/gtm.htm, nethelp3.3/gtmem.htm, nethelp3.3/gtmfwd.htm,
+ nethelp3.3/gtminit.htm, nethelp3.3/gtmlmean.htm,
+ nethelp3.3/gtmlmode.htm, nethelp3.3/gtmmag.htm,
+ nethelp3.3/gtmpost.htm, nethelp3.3/gtmprob.htm,
+ nethelp3.3/hbayes.htm, nethelp3.3/hesschek.htm,
+ nethelp3.3/hintmat.htm, nethelp3.3/hinton.htm,
+ nethelp3.3/histp.htm, nethelp3.3/hmc.htm, nethelp3.3/index.htm,
+ nethelp3.3/kmeans.htm, nethelp3.3/knn.htm, nethelp3.3/knnfwd.htm,
+ nethelp3.3/linef.htm, nethelp3.3/linemin.htm,
+ nethelp3.3/maxitmess.htm, nethelp3.3/mdn.htm,
+ nethelp3.3/mdn2gmm.htm, nethelp3.3/mdndist2.htm,
+ nethelp3.3/mdnerr.htm, nethelp3.3/mdnfwd.htm,
+ nethelp3.3/mdngrad.htm, nethelp3.3/mdninit.htm,
+ nethelp3.3/mdnpak.htm, nethelp3.3/mdnpost.htm,
+ nethelp3.3/mdnprob.htm, nethelp3.3/mdnunpak.htm,
+ nethelp3.3/metrop.htm, nethelp3.3/minbrack.htm,
+ nethelp3.3/mlp.htm, nethelp3.3/mlpbkp.htm,
+ nethelp3.3/mlpderiv.htm, nethelp3.3/mlperr.htm,
+ nethelp3.3/mlpevfwd.htm, nethelp3.3/mlpfwd.htm,
+ nethelp3.3/mlpgrad.htm, nethelp3.3/mlphdotv.htm,
+ nethelp3.3/mlphess.htm, nethelp3.3/mlphint.htm,
+ nethelp3.3/mlpinit.htm, nethelp3.3/mlppak.htm,
+ nethelp3.3/mlpprior.htm, nethelp3.3/mlptrain.htm,
+ nethelp3.3/mlpunpak.htm, nethelp3.3/netderiv.htm,
+ nethelp3.3/neterr.htm, nethelp3.3/netevfwd.htm,
+ nethelp3.3/netgrad.htm, nethelp3.3/nethelp3.3.zip,
+ nethelp3.3/nethess.htm, nethelp3.3/netinit.htm,
+ nethelp3.3/netopt.htm, nethelp3.3/netpak.htm,
+ nethelp3.3/netunpak.htm, nethelp3.3/olgd.htm, nethelp3.3/pca.htm,
+ nethelp3.3/plotmat.htm, nethelp3.3/ppca.htm,
+ nethelp3.3/quasinew.htm, nethelp3.3/rbf.htm,
+ nethelp3.3/rbfbkp.htm, nethelp3.3/rbfderiv.htm,
+ nethelp3.3/rbferr.htm, nethelp3.3/rbfevfwd.htm,
+ nethelp3.3/rbffwd.htm, nethelp3.3/rbfgrad.htm,
+ nethelp3.3/rbfhess.htm, nethelp3.3/rbfjacob.htm,
+ nethelp3.3/rbfpak.htm, nethelp3.3/rbfprior.htm,
+ nethelp3.3/rbfsetbf.htm, nethelp3.3/rbfsetfw.htm,
+ nethelp3.3/rbftrain.htm, nethelp3.3/rbfunpak.htm,
+ nethelp3.3/rosegrad.htm, nethelp3.3/rosen.htm,
+ nethelp3.3/scg.htm, nethelp3.3/som.htm, nethelp3.3/somfwd.htm,
+ nethelp3.3/sompak.htm, nethelp3.3/somtrain.htm,
+ nethelp3.3/somunpak.htm, netlab3.3/Contents.m, netlab3.3/LICENSE,
+ netlab3.3/conffig.m, netlab3.3/confmat.m, netlab3.3/conjgrad.m,
+ netlab3.3/consist.m, netlab3.3/convertoldnet.m,
+ netlab3.3/datread.m, netlab3.3/datwrite.m, netlab3.3/demard.m,
+ netlab3.3/demev1.m, netlab3.3/demev2.m, netlab3.3/demev3.m,
+ netlab3.3/demgauss.m, netlab3.3/demglm1.m, netlab3.3/demglm2.m,
+ netlab3.3/demgmm2.m, netlab3.3/demgmm3.m, netlab3.3/demgmm4.m,
+ netlab3.3/demgmm5.m, netlab3.3/demgp.m, netlab3.3/demgpard.m,
+ netlab3.3/demgpot.m, netlab3.3/demgtm1.m, netlab3.3/demgtm2.m,
+ netlab3.3/demhint.m, netlab3.3/demhmc2.m, netlab3.3/demhmc3.m,
+ netlab3.3/demkmn1.m, netlab3.3/demknn1.m, netlab3.3/demmdn1.m,
+ netlab3.3/demmlp1.m, netlab3.3/demmlp2.m, netlab3.3/demnlab.m,
+ netlab3.3/demns1.m, netlab3.3/demolgd1.m, netlab3.3/demopt1.m,
+ netlab3.3/dempot.m, netlab3.3/demprgp.m, netlab3.3/demprior.m,
+ netlab3.3/demrbf1.m, netlab3.3/demsom1.m, netlab3.3/demtrain.m,
+ netlab3.3/dist2.m, netlab3.3/eigdec.m, netlab3.3/errbayes.m,
+ netlab3.3/evidence.m, netlab3.3/fevbayes.m, netlab3.3/gauss.m,
+ netlab3.3/gbayes.m, netlab3.3/glm.m, netlab3.3/glmderiv.m,
+ netlab3.3/glmerr.m, netlab3.3/glmevfwd.m, netlab3.3/glmfwd.m,
+ netlab3.3/glmgrad.m, netlab3.3/glmhess.m, netlab3.3/glminit.m,
+ netlab3.3/glmpak.m, netlab3.3/glmtrain.m, netlab3.3/glmunpak.m,
+ netlab3.3/gmm.m, netlab3.3/gmmactiv.m, netlab3.3/gmmem.m,
+ netlab3.3/gmmpak.m, netlab3.3/gmmpost.m, netlab3.3/gmmprob.m,
+ netlab3.3/gmmsamp.m, netlab3.3/gmmunpak.m, netlab3.3/gp.m,
+ netlab3.3/gpcovar.m, netlab3.3/gpcovarf.m, netlab3.3/gpcovarp.m,
+ netlab3.3/gperr.m, netlab3.3/gpfwd.m, netlab3.3/gpgrad.m,
+ netlab3.3/gpinit.m, netlab3.3/gppak.m, netlab3.3/gpunpak.m,
+ netlab3.3/gradchek.m, netlab3.3/graddesc.m, netlab3.3/gsamp.m,
+ netlab3.3/gtm.m, netlab3.3/gtmem.m, netlab3.3/gtmfwd.m,
+ netlab3.3/gtminit.m, netlab3.3/gtmlmean.m, netlab3.3/gtmlmode.m,
+ netlab3.3/gtmmag.m, netlab3.3/gtmpost.m, netlab3.3/gtmprob.m,
+ netlab3.3/hbayes.m, netlab3.3/hesschek.m, netlab3.3/hintmat.m,
+ netlab3.3/hinton.m, netlab3.3/histp.m, netlab3.3/hmc.m,
+ netlab3.3/kmeansNetlab.m, netlab3.3/knn.m, netlab3.3/knnfwd.m,
+ netlab3.3/linef.m, netlab3.3/linemin.m, netlab3.3/maxitmess.m,
+ netlab3.3/mdn.m, netlab3.3/mdn2gmm.m, netlab3.3/mdndist2.m,
+ netlab3.3/mdnerr.m, netlab3.3/mdnfwd.m, netlab3.3/mdngrad.m,
+ netlab3.3/mdninit.m, netlab3.3/mdnnet.mat, netlab3.3/mdnpak.m,
+ netlab3.3/mdnpost.m, netlab3.3/mdnprob.m, netlab3.3/mdnunpak.m,
+ netlab3.3/minbrack.m, netlab3.3/mlp.m, netlab3.3/mlpbkp.m,
+ netlab3.3/mlpderiv.m, netlab3.3/mlperr.m, netlab3.3/mlpevfwd.m,
+ netlab3.3/mlpfwd.m, netlab3.3/mlpgrad.m, netlab3.3/mlphdotv.m,
+ netlab3.3/mlphess.m, netlab3.3/mlphint.m, netlab3.3/mlpinit.m,
+ netlab3.3/mlppak.m, netlab3.3/mlpprior.m, netlab3.3/mlptrain.m,
+ netlab3.3/mlpunpak.m, netlab3.3/netderiv.m, netlab3.3/neterr.m,
+ netlab3.3/netevfwd.m, netlab3.3/netgrad.m, netlab3.3/nethess.m,
+ netlab3.3/netinit.m, netlab3.3/netlab3.3.zip,
+ netlab3.3/netlogo.mat, netlab3.3/netopt.m, netlab3.3/netpak.m,
+ netlab3.3/netunpak.m, netlab3.3/oilTrn.dat, netlab3.3/oilTst.dat,
+ netlab3.3/olgd.m, netlab3.3/pca.m, netlab3.3/plotmat.m,
+ netlab3.3/ppca.m, netlab3.3/quasinew.m, netlab3.3/rbf.m,
+ netlab3.3/rbfbkp.m, netlab3.3/rbfderiv.m, netlab3.3/rbferr.m,
+ netlab3.3/rbfevfwd.m, netlab3.3/rbffwd.m, netlab3.3/rbfgrad.m,
+ netlab3.3/rbfhess.m, netlab3.3/rbfjacob.m, netlab3.3/rbfpak.m,
+ netlab3.3/rbfprior.m, netlab3.3/rbfsetbf.m, netlab3.3/rbfsetfw.m,
+ netlab3.3/rbftrain.m, netlab3.3/rbfunpak.m, netlab3.3/rosegrad.m,
+ netlab3.3/rosen.m, netlab3.3/scg.m, netlab3.3/som.m,
+ netlab3.3/somfwd.m, netlab3.3/sompak.m, netlab3.3/somtrain.m,
+ netlab3.3/somunpak.m, netlab3.3/xor.dat, netlabKPM/README.txt,
+ netlabKPM/demgmm1_movie.m, netlabKPM/evidence_weighted.m,
+ netlabKPM/glmerr_weighted.m, netlabKPM/glmgrad_weighted.m,
+ netlabKPM/glmhess_weighted.m, netlabKPM/glmtrain_weighted.m,
+ netlabKPM/gmm1.avi, netlabKPM/gmmem2.m,
+ netlabKPM/gmmem_multi_restart.m, netlabKPM/kmeans_demo.m,
+ netlabKPM/mlperr_weighted.m, netlabKPM/mlpgrad_weighted.m,
+ netlabKPM/mlphdotv_weighted.m, netlabKPM/mlphess_weighted.m,
+ netlabKPM/neterr_weighted.m, netlabKPM/netgrad_weighted.m,
+ netlabKPM/nethess_weighted.m, netlabKPM/netopt_weighted.m,
+ netlabKPM/process_options.m: Initial revision
+
+2005-04-25 19:29 yozhik
+
+ * KPMstats/KLgauss.m, KPMstats/README.txt, KPMstats/beta_sample.m,
+ KPMstats/chisquared_histo.m, KPMstats/chisquared_prob.m,
+ KPMstats/chisquared_readme.txt, KPMstats/chisquared_table.m,
+ KPMstats/clg_Mstep.m, KPMstats/clg_Mstep_simple.m,
+ KPMstats/clg_prob.m, KPMstats/condGaussToJoint.m,
+ KPMstats/cond_indep_fisher_z.m,
+ KPMstats/condgaussTrainObserved.m, KPMstats/condgauss_sample.m,
+ KPMstats/convertBinaryLabels.m, KPMstats/cwr_demo.m,
+ KPMstats/cwr_em.m, KPMstats/cwr_predict.m, KPMstats/cwr_prob.m,
+ KPMstats/cwr_readme.txt, KPMstats/cwr_test.m,
+ KPMstats/dirichlet_sample.m, KPMstats/distchck.m,
+ KPMstats/eigdec.m, KPMstats/est_transmat.m,
+ KPMstats/fit_paritioned_model_testfn.m,
+ KPMstats/fit_partitioned_model.m, KPMstats/gamma_sample.m,
+ KPMstats/gaussian_prob.m, KPMstats/gaussian_sample.m,
+ KPMstats/linear_regression.m, KPMstats/logist2.m,
+ KPMstats/logist2Apply.m, KPMstats/logist2ApplyRegularized.m,
+ KPMstats/logist2Fit.m, KPMstats/logist2FitRegularized.m,
+ KPMstats/logistK.m, KPMstats/logistK_eval.m,
+ KPMstats/marginalize_gaussian.m, KPMstats/matrix_T_pdf.m,
+ KPMstats/matrix_normal_pdf.m, KPMstats/mc_stat_distrib.m,
+ KPMstats/mixgauss_Mstep.m, KPMstats/mixgauss_classifier_apply.m,
+ KPMstats/mixgauss_classifier_train.m, KPMstats/mixgauss_em.m,
+ KPMstats/mixgauss_init.m, KPMstats/mixgauss_prob.m,
+ KPMstats/mixgauss_prob_test.m, KPMstats/mixgauss_sample.m,
+ KPMstats/mkPolyFvec.m, KPMstats/mk_unit_norm.m,
+ KPMstats/multinomial_prob.m, KPMstats/multinomial_sample.m,
+ KPMstats/normal_coef.m, KPMstats/partial_corr_coef.m,
+ KPMstats/parzen.m, KPMstats/parzenC.c, KPMstats/parzenC.dll,
+ KPMstats/parzenC.mexglx, KPMstats/parzenC_test.m,
+ KPMstats/parzen_fit_select_unif.m, KPMstats/pca.m,
+ KPMstats/rndcheck.m, KPMstats/sample.m,
+ KPMstats/sample_discrete.m, KPMstats/sample_gaussian.m,
+ KPMstats/student_t_logprob.m, KPMstats/student_t_prob.m,
+ KPMstats/unif_discrete_sample.m, KPMstats/weightedRegression.m,
+ KPMtools/README.txt, KPMtools/approx_unique.m,
+ KPMtools/approxeq.m, KPMtools/argmax.m, KPMtools/argmin.m,
+ KPMtools/assert.m, KPMtools/assignEdgeNums.m,
+ KPMtools/assign_cols.m, KPMtools/axis_pct.m, KPMtools/block.m,
+ KPMtools/cell2num.m, KPMtools/chi2inv.m, KPMtools/choose.m,
+ KPMtools/collapse_mog.m, KPMtools/colmult.c,
+ KPMtools/colmult.mexglx, KPMtools/computeROC.m,
+ KPMtools/compute_counts.m, KPMtools/conf2mahal.m,
+ KPMtools/cross_entropy.m, KPMtools/div.m, KPMtools/draw_circle.m,
+ KPMtools/draw_ellipse.m, KPMtools/draw_ellipse_axes.m,
+ KPMtools/em_converged.m, KPMtools/entropy.m,
+ KPMtools/exportfig.m, KPMtools/extend_domain_table.m,
+ KPMtools/factorial.m, KPMtools/find_equiv_posns.m,
+ KPMtools/hash_add.m, KPMtools/hash_del.m, KPMtools/hash_lookup.m,
+ KPMtools/hungarian.m, KPMtools/image_rgb.m,
+ KPMtools/imresizeAspect.m, KPMtools/ind2subv.c,
+ KPMtools/ind2subv.m, KPMtools/installC_KPMtools.m,
+ KPMtools/is_psd.m, KPMtools/is_stochastic.m,
+ KPMtools/isemptycell.m, KPMtools/isposdef.m, KPMtools/isscalar.m,
+ KPMtools/isvector.m, KPMtools/junk.c, KPMtools/loadcell.m,
+ KPMtools/logb.m, KPMtools/logdet.m, KPMtools/logsum.m,
+ KPMtools/logsum_simple.m, KPMtools/logsum_test.m,
+ KPMtools/logsumexp.m, KPMtools/logsumexpv.m,
+ KPMtools/marg_table.m, KPMtools/marginalize_table.m,
+ KPMtools/matprint.m, KPMtools/max_mult.c, KPMtools/max_mult.m,
+ KPMtools/mexutil.c, KPMtools/mexutil.h,
+ KPMtools/mk_multi_index.m, KPMtools/mk_stochastic.m,
+ KPMtools/mult_by_table.m, KPMtools/myintersect.m,
+ KPMtools/myismember.m, KPMtools/myones.m, KPMtools/myplot.m,
+ KPMtools/myrand.m, KPMtools/myrepmat.m, KPMtools/myreshape.m,
+ KPMtools/mysetdiff.m, KPMtools/mysize.m, KPMtools/mysubset.m,
+ KPMtools/mysymsetdiff.m, KPMtools/bipartiteMatchingHungarian.m,
+ KPMtools/myunion.m, KPMtools/nchoose2.m, KPMtools/ncols.m,
+ KPMtools/nonmaxsup.m, KPMtools/normalise.m,
+ KPMtools/normaliseC.c, KPMtools/normaliseC.dll,
+ KPMtools/normalize.m, KPMtools/nrows.m, KPMtools/num2strcell.m,
+ KPMtools/partitionData.m, KPMtools/partition_matrix_vec.m,
+ KPMtools/pca_netlab.m, KPMtools/pick.m, KPMtools/plotROC.m,
+ KPMtools/plotROCkpm.m, KPMtools/plot_axis_thru_origin.m,
+ KPMtools/plot_ellipse.m, KPMtools/plot_matrix.m,
+ KPMtools/plot_polygon.m, KPMtools/plotcov2.m,
+ KPMtools/plotcov3.m, KPMtools/plotgauss1d.m,
+ KPMtools/plotgauss2d.m, KPMtools/plotgauss2d_old.m,
+ KPMtools/polygon_area.m, KPMtools/polygon_centroid.m,
+ KPMtools/polygon_intersect.m, KPMtools/previewfig.m,
+ KPMtools/process_options.m, KPMtools/rand_psd.m,
+ KPMtools/rectintC.m, KPMtools/rectintLoopC.mexglx,
+ KPMtools/rectintSparse.m, KPMtools/rectintSparseC.m,
+ KPMtools/rectintSparseLoopC.c, KPMtools/rectintSparseLoopC.dll,
+ KPMtools/repmatC.c, KPMtools/repmatC.dll,
+ KPMtools/repmatC.mexglx, KPMtools/rgb2grayKPM.m,
+ KPMtools/rnd_partition.m, KPMtools/rotate_xlabel.m,
+ KPMtools/safeStr.m, KPMtools/sampleUniformInts.m,
+ KPMtools/sample_discrete.m, KPMtools/set_xtick_label.m,
+ KPMtools/set_xtick_label_demo.m, KPMtools/setdiag.m,
+ KPMtools/softeye.m, KPMtools/sort_evec.m,
+ KPMtools/splitLongSeqIntoManyShort.m, KPMtools/sprintf_intvec.m,
+ KPMtools/sqdist.m, KPMtools/strmatch_multi.m,
+ KPMtools/strmatch_substr.m, KPMtools/subplot2.m,
+ KPMtools/subplot3.m, KPMtools/subsets.m, KPMtools/subv2ind.c,
+ KPMtools/subv2ind.m, KPMtools/sumv.m, KPMtools/suptitle.m,
+ KPMtools/unaryEncoding.m, KPMtools/wrap.m,
+ KPMtools/xticklabel_rotate90.m, KPMtools/zipload.m,
+ KPMtools/zipsave.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-04-25 19:29 yozhik
+
+ * KPMstats/KLgauss.m, KPMstats/README.txt, KPMstats/beta_sample.m,
+ KPMstats/chisquared_histo.m, KPMstats/chisquared_prob.m,
+ KPMstats/chisquared_readme.txt, KPMstats/chisquared_table.m,
+ KPMstats/clg_Mstep.m, KPMstats/clg_Mstep_simple.m,
+ KPMstats/clg_prob.m, KPMstats/condGaussToJoint.m,
+ KPMstats/cond_indep_fisher_z.m,
+ KPMstats/condgaussTrainObserved.m, KPMstats/condgauss_sample.m,
+ KPMstats/convertBinaryLabels.m, KPMstats/cwr_demo.m,
+ KPMstats/cwr_em.m, KPMstats/cwr_predict.m, KPMstats/cwr_prob.m,
+ KPMstats/cwr_readme.txt, KPMstats/cwr_test.m,
+ KPMstats/dirichlet_sample.m, KPMstats/distchck.m,
+ KPMstats/eigdec.m, KPMstats/est_transmat.m,
+ KPMstats/fit_paritioned_model_testfn.m,
+ KPMstats/fit_partitioned_model.m, KPMstats/gamma_sample.m,
+ KPMstats/gaussian_prob.m, KPMstats/gaussian_sample.m,
+ KPMstats/linear_regression.m, KPMstats/logist2.m,
+ KPMstats/logist2Apply.m, KPMstats/logist2ApplyRegularized.m,
+ KPMstats/logist2Fit.m, KPMstats/logist2FitRegularized.m,
+ KPMstats/logistK.m, KPMstats/logistK_eval.m,
+ KPMstats/marginalize_gaussian.m, KPMstats/matrix_T_pdf.m,
+ KPMstats/matrix_normal_pdf.m, KPMstats/mc_stat_distrib.m,
+ KPMstats/mixgauss_Mstep.m, KPMstats/mixgauss_classifier_apply.m,
+ KPMstats/mixgauss_classifier_train.m, KPMstats/mixgauss_em.m,
+ KPMstats/mixgauss_init.m, KPMstats/mixgauss_prob.m,
+ KPMstats/mixgauss_prob_test.m, KPMstats/mixgauss_sample.m,
+ KPMstats/mkPolyFvec.m, KPMstats/mk_unit_norm.m,
+ KPMstats/multinomial_prob.m, KPMstats/multinomial_sample.m,
+ KPMstats/normal_coef.m, KPMstats/partial_corr_coef.m,
+ KPMstats/parzen.m, KPMstats/parzenC.c, KPMstats/parzenC.dll,
+ KPMstats/parzenC.mexglx, KPMstats/parzenC_test.m,
+ KPMstats/parzen_fit_select_unif.m, KPMstats/pca.m,
+ KPMstats/rndcheck.m, KPMstats/sample.m,
+ KPMstats/sample_discrete.m, KPMstats/sample_gaussian.m,
+ KPMstats/student_t_logprob.m, KPMstats/student_t_prob.m,
+ KPMstats/unif_discrete_sample.m, KPMstats/weightedRegression.m,
+ KPMtools/README.txt, KPMtools/approx_unique.m,
+ KPMtools/approxeq.m, KPMtools/argmax.m, KPMtools/argmin.m,
+ KPMtools/assert.m, KPMtools/assignEdgeNums.m,
+ KPMtools/assign_cols.m, KPMtools/axis_pct.m, KPMtools/block.m,
+ KPMtools/cell2num.m, KPMtools/chi2inv.m, KPMtools/choose.m,
+ KPMtools/collapse_mog.m, KPMtools/colmult.c,
+ KPMtools/colmult.mexglx, KPMtools/computeROC.m,
+ KPMtools/compute_counts.m, KPMtools/conf2mahal.m,
+ KPMtools/cross_entropy.m, KPMtools/div.m, KPMtools/draw_circle.m,
+ KPMtools/draw_ellipse.m, KPMtools/draw_ellipse_axes.m,
+ KPMtools/em_converged.m, KPMtools/entropy.m,
+ KPMtools/exportfig.m, KPMtools/extend_domain_table.m,
+ KPMtools/factorial.m, KPMtools/find_equiv_posns.m,
+ KPMtools/hash_add.m, KPMtools/hash_del.m, KPMtools/hash_lookup.m,
+ KPMtools/hungarian.m, KPMtools/image_rgb.m,
+ KPMtools/imresizeAspect.m, KPMtools/ind2subv.c,
+ KPMtools/ind2subv.m, KPMtools/installC_KPMtools.m,
+ KPMtools/is_psd.m, KPMtools/is_stochastic.m,
+ KPMtools/isemptycell.m, KPMtools/isposdef.m, KPMtools/isscalar.m,
+ KPMtools/isvector.m, KPMtools/junk.c, KPMtools/loadcell.m,
+ KPMtools/logb.m, KPMtools/logdet.m, KPMtools/logsum.m,
+ KPMtools/logsum_simple.m, KPMtools/logsum_test.m,
+ KPMtools/logsumexp.m, KPMtools/logsumexpv.m,
+ KPMtools/marg_table.m, KPMtools/marginalize_table.m,
+ KPMtools/matprint.m, KPMtools/max_mult.c, KPMtools/max_mult.m,
+ KPMtools/mexutil.c, KPMtools/mexutil.h,
+ KPMtools/mk_multi_index.m, KPMtools/mk_stochastic.m,
+ KPMtools/mult_by_table.m, KPMtools/myintersect.m,
+ KPMtools/myismember.m, KPMtools/myones.m, KPMtools/myplot.m,
+ KPMtools/myrand.m, KPMtools/myrepmat.m, KPMtools/myreshape.m,
+ KPMtools/mysetdiff.m, KPMtools/mysize.m, KPMtools/mysubset.m,
+ KPMtools/mysymsetdiff.m, KPMtools/bipartiteMatchingHungarian.m,
+ KPMtools/myunion.m, KPMtools/nchoose2.m, KPMtools/ncols.m,
+ KPMtools/nonmaxsup.m, KPMtools/normalise.m,
+ KPMtools/normaliseC.c, KPMtools/normaliseC.dll,
+ KPMtools/normalize.m, KPMtools/nrows.m, KPMtools/num2strcell.m,
+ KPMtools/partitionData.m, KPMtools/partition_matrix_vec.m,
+ KPMtools/pca_netlab.m, KPMtools/pick.m, KPMtools/plotROC.m,
+ KPMtools/plotROCkpm.m, KPMtools/plot_axis_thru_origin.m,
+ KPMtools/plot_ellipse.m, KPMtools/plot_matrix.m,
+ KPMtools/plot_polygon.m, KPMtools/plotcov2.m,
+ KPMtools/plotcov3.m, KPMtools/plotgauss1d.m,
+ KPMtools/plotgauss2d.m, KPMtools/plotgauss2d_old.m,
+ KPMtools/polygon_area.m, KPMtools/polygon_centroid.m,
+ KPMtools/polygon_intersect.m, KPMtools/previewfig.m,
+ KPMtools/process_options.m, KPMtools/rand_psd.m,
+ KPMtools/rectintC.m, KPMtools/rectintLoopC.mexglx,
+ KPMtools/rectintSparse.m, KPMtools/rectintSparseC.m,
+ KPMtools/rectintSparseLoopC.c, KPMtools/rectintSparseLoopC.dll,
+ KPMtools/repmatC.c, KPMtools/repmatC.dll,
+ KPMtools/repmatC.mexglx, KPMtools/rgb2grayKPM.m,
+ KPMtools/rnd_partition.m, KPMtools/rotate_xlabel.m,
+ KPMtools/safeStr.m, KPMtools/sampleUniformInts.m,
+ KPMtools/sample_discrete.m, KPMtools/set_xtick_label.m,
+ KPMtools/set_xtick_label_demo.m, KPMtools/setdiag.m,
+ KPMtools/softeye.m, KPMtools/sort_evec.m,
+ KPMtools/splitLongSeqIntoManyShort.m, KPMtools/sprintf_intvec.m,
+ KPMtools/sqdist.m, KPMtools/strmatch_multi.m,
+ KPMtools/strmatch_substr.m, KPMtools/subplot2.m,
+ KPMtools/subplot3.m, KPMtools/subsets.m, KPMtools/subv2ind.c,
+ KPMtools/subv2ind.m, KPMtools/sumv.m, KPMtools/suptitle.m,
+ KPMtools/unaryEncoding.m, KPMtools/wrap.m,
+ KPMtools/xticklabel_rotate90.m, KPMtools/zipload.m,
+ KPMtools/zipsave.m: Initial revision
+
+2005-04-03 18:39 yozhik
+
+ * BNT/learning/score_dags.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-04-03 18:39 yozhik
+
+ * BNT/learning/score_dags.m: Initial revision
+
+2005-03-31 11:20 yozhik
+
+ * BNT/installC_BNT.m: Initial import of code base from Kevin
+ Murphy.
+
+2005-03-31 11:20 yozhik
+
+ * BNT/installC_BNT.m: Initial revision
+
+2005-03-30 11:59 yozhik
+
+ * KPMtools/asdemo.html: Initial import of code base from Kevin
+ Murphy.
+
+2005-03-30 11:59 yozhik
+
+ * KPMtools/asdemo.html: Initial revision
+
+2005-03-26 18:51 yozhik
+
+ * KPMtools/asdemo.m: Initial import of code base from Kevin Murphy.
+
+2005-03-26 18:51 yozhik
+
+ * KPMtools/asdemo.m: Initial revision
+
+2005-01-15 18:27 yozhik
+
+ * BNT/CPDs/@tabular_CPD/: get_field.m, set_fields.m, tabular_CPD.m:
+ Initial import of code base from Kevin Murphy.
+
+2005-01-15 18:27 yozhik
+
+ * BNT/CPDs/@tabular_CPD/: get_field.m, set_fields.m, tabular_CPD.m:
+ Initial revision
+
+2004-11-24 12:12 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/junk: Initial import of
+ code base from Kevin Murphy.
+
+2004-11-24 12:12 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/junk: Initial revision
+
+2004-11-22 14:41 yozhik
+
+ * BNT/examples/dynamic/orig_water1.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-11-22 14:41 yozhik
+
+ * BNT/examples/dynamic/orig_water1.m: Initial revision
+
+2004-11-22 14:15 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/cbk_inf_engine.m: Initial
+ import of code base from Kevin Murphy.
+
+2004-11-22 14:15 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/cbk_inf_engine.m: Initial
+ revision
+
+2004-11-06 13:52 yozhik
+
+ * BNT/examples/static/StructLearn/model_select2.m: Initial import
+ of code base from Kevin Murphy.
+
+2004-11-06 13:52 yozhik
+
+ * BNT/examples/static/StructLearn/model_select2.m: Initial revision
+
+2004-11-06 12:55 yozhik
+
+ * BNT/examples/static/StructLearn/model_select1.m: Initial import
+ of code base from Kevin Murphy.
+
+2004-11-06 12:55 yozhik
+
+ * BNT/examples/static/StructLearn/model_select1.m: Initial revision
+
+2004-10-22 18:18 yozhik
+
+ * HMM/viterbi_path.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-10-22 18:18 yozhik
+
+ * HMM/viterbi_path.m: Initial revision
+
+2004-09-29 20:09 yozhik
+
+ * BNT/inference/static/@var_elim_inf_engine/marginal_nodes.m:
+ Initial import of code base from Kevin Murphy.
+
+2004-09-29 20:09 yozhik
+
+ * BNT/inference/static/@var_elim_inf_engine/marginal_nodes.m:
+ Initial revision
+
+2004-09-12 20:21 yozhik
+
+ * BNT/examples/limids/amnio.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-09-12 20:21 yozhik
+
+ * BNT/examples/limids/amnio.m: Initial revision
+
+2004-09-12 19:27 yozhik
+
+ * BNT/examples/limids/oil1.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-09-12 19:27 yozhik
+
+ * BNT/examples/limids/oil1.m: Initial revision
+
+2004-09-12 14:01 yozhik
+
+ * BNT/examples/static/sprinkler1.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-09-12 14:01 yozhik
+
+ * BNT/examples/static/sprinkler1.m: Initial revision
+
+2004-08-29 05:41 yozhik
+
+ * HMM/transmat_train_observed.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-08-29 05:41 yozhik
+
+ * HMM/transmat_train_observed.m: Initial revision
+
+2004-08-05 08:25 yozhik
+
+ * BNT/potentials/: @dpot/divide_by_pot.m, Tables/divide_by_table.m:
+ Initial import of code base from Kevin Murphy.
+
+2004-08-05 08:25 yozhik
+
+ * BNT/potentials/: @dpot/divide_by_pot.m, Tables/divide_by_table.m:
+ Initial revision
+
+2004-08-04 12:59 yozhik
+
+ * BNT/potentials/@dpot/: marginalize_pot.m, multiply_by_pot.m:
+ Initial import of code base from Kevin Murphy.
+
+2004-08-04 12:59 yozhik
+
+ * BNT/potentials/@dpot/: marginalize_pot.m, multiply_by_pot.m:
+ Initial revision
+
+2004-08-04 12:36 yozhik
+
+ * BNT/@assocarray/subsref.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-08-04 12:36 yozhik
+
+ * BNT/@assocarray/subsref.m: Initial revision
+
+2004-08-04 08:54 yozhik
+
+ * BNT/potentials/@dpot/normalize_pot.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-08-04 08:54 yozhik
+
+ * BNT/potentials/@dpot/normalize_pot.m: Initial revision
+
+2004-08-04 08:51 yozhik
+
+ * BNT/potentials/Tables/: marg_table.m, mult_by_table.m,
+ extend_domain_table.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-08-04 08:51 yozhik
+
+ * BNT/potentials/Tables/: marg_table.m, mult_by_table.m,
+ extend_domain_table.m: Initial revision
+
+2004-08-02 15:23 yozhik
+
+ * BNT/CPDs/@noisyor_CPD/CPD_to_CPT.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-08-02 15:23 yozhik
+
+ * BNT/CPDs/@noisyor_CPD/CPD_to_CPT.m: Initial revision
+
+2004-08-02 15:05 yozhik
+
+ * BNT/general/noisyORtoTable.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-08-02 15:05 yozhik
+
+ * BNT/general/noisyORtoTable.m: Initial revision
+
+2004-06-29 10:46 yozhik
+
+ * BNT/learning/learn_struct_pdag_pc.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-06-29 10:46 yozhik
+
+ * BNT/learning/learn_struct_pdag_pc.m: Initial revision
+
+2004-06-15 10:50 yozhik
+
+ * GraphViz/graph_to_dot.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-06-15 10:50 yozhik
+
+ * GraphViz/graph_to_dot.m: Initial revision
+
+2004-06-11 14:16 yozhik
+
+ * BNT/CPDs/@tabular_CPD/log_marg_prob_node.m: Initial import of
+ code base from Kevin Murphy.
+
+2004-06-11 14:16 yozhik
+
+ * BNT/CPDs/@tabular_CPD/log_marg_prob_node.m: Initial revision
+
+2004-06-09 18:56 yozhik
+
+ * BNT/README.txt: Initial import of code base from Kevin Murphy.
+
+2004-06-09 18:56 yozhik
+
+ * BNT/README.txt: Initial revision
+
+2004-06-09 18:53 yozhik
+
+ * BNT/CPDs/@generic_CPD/learn_params.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-06-09 18:53 yozhik
+
+ * BNT/CPDs/@generic_CPD/learn_params.m: Initial revision
+
+2004-06-09 18:42 yozhik
+
+ * BNT/examples/static/nodeorderExample.m: Initial import of code
+ base from Kevin Murphy.
+
+2004-06-09 18:42 yozhik
+
+ * BNT/examples/static/nodeorderExample.m: Initial revision
+
+2004-06-09 18:33 yozhik
+
+ * BNT/: learning/score_family.m, test_BNT.m: Initial import of code
+ base from Kevin Murphy.
+
+2004-06-09 18:33 yozhik
+
+ * BNT/: learning/score_family.m, test_BNT.m: Initial revision
+
+2004-06-09 18:28 yozhik
+
+ * BNT/: learning/learn_params.m, CPDs/@gaussian_CPD/learn_params.m,
+ examples/static/gaussian2.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-06-09 18:28 yozhik
+
+ * BNT/: learning/learn_params.m, CPDs/@gaussian_CPD/learn_params.m,
+ examples/static/gaussian2.m: Initial revision
+
+2004-06-09 18:25 yozhik
+
+ * BNT/CPDs/@tabular_CPD/learn_params.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-06-09 18:25 yozhik
+
+ * BNT/CPDs/@tabular_CPD/learn_params.m: Initial revision
+
+2004-06-09 18:17 yozhik
+
+ * BNT/general/sample_bnet.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-06-09 18:17 yozhik
+
+ * BNT/general/sample_bnet.m: Initial revision
+
+2004-06-07 12:45 yozhik
+
+ * BNT/examples/static/discrete1.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-06-07 12:45 yozhik
+
+ * BNT/examples/static/discrete1.m: Initial revision
+
+2004-06-07 12:04 yozhik
+
+ * BNT/: inference/static/@global_joint_inf_engine/marginal_nodes.m,
+ inference/static/@global_joint_inf_engine/enter_evidence.m,
+ examples/dynamic/mk_bat_dbn.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-06-07 12:04 yozhik
+
+ * BNT/: inference/static/@global_joint_inf_engine/marginal_nodes.m,
+ inference/static/@global_joint_inf_engine/enter_evidence.m,
+ examples/dynamic/mk_bat_dbn.m: Initial revision
+
+2004-06-07 08:53 yozhik
+
+ * BNT/examples/limids/asia_dt1.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-06-07 08:53 yozhik
+
+ * BNT/examples/limids/asia_dt1.m: Initial revision
+
+2004-06-07 08:48 yozhik
+
+ * BNT/general/: solve_limid.m, compute_joint_pot.m: Initial import
+ of code base from Kevin Murphy.
+
+2004-06-07 08:48 yozhik
+
+ * BNT/general/: solve_limid.m, compute_joint_pot.m: Initial
+ revision
+
+2004-06-07 07:39 yozhik
+
+ * Kalman/README.txt: Initial import of code base from Kevin Murphy.
+
+2004-06-07 07:39 yozhik
+
+ * Kalman/README.txt: Initial revision
+
+2004-06-07 07:33 yozhik
+
+ * GraphViz/README.txt: Initial import of code base from Kevin
+ Murphy.
+
+2004-06-07 07:33 yozhik
+
+ * GraphViz/README.txt: Initial revision
+
+2004-05-31 15:19 yozhik
+
+ * HMM/dhmm_sample.m: Initial import of code base from Kevin Murphy.
+
+2004-05-31 15:19 yozhik
+
+ * HMM/dhmm_sample.m: Initial revision
+
+2004-05-25 17:32 yozhik
+
+ * HMM/mhmm_sample.m: Initial import of code base from Kevin Murphy.
+
+2004-05-25 17:32 yozhik
+
+ * HMM/mhmm_sample.m: Initial revision
+
+2004-05-24 15:26 yozhik
+
+ * HMM/mc_sample.m: Initial import of code base from Kevin Murphy.
+
+2004-05-24 15:26 yozhik
+
+ * HMM/mc_sample.m: Initial revision
+
+2004-05-18 07:50 yozhik
+
+ * BNT/installC_graph.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-05-18 07:50 yozhik
+
+ * BNT/installC_graph.m: Initial revision
+
+2004-05-13 18:13 yozhik
+
+ * BNT/inference/static/@gaussian_inf_engine/gaussian_inf_engine.m:
+ Initial import of code base from Kevin Murphy.
+
+2004-05-13 18:13 yozhik
+
+ * BNT/inference/static/@gaussian_inf_engine/gaussian_inf_engine.m:
+ Initial revision
+
+2004-05-11 12:23 yozhik
+
+ * BNT/examples/dynamic/mk_chmm.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-05-11 12:23 yozhik
+
+ * BNT/examples/dynamic/mk_chmm.m: Initial revision
+
+2004-05-11 11:45 yozhik
+
+ * BNT/examples/dynamic/mk_water_dbn.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-05-11 11:45 yozhik
+
+ * BNT/examples/dynamic/mk_water_dbn.m: Initial revision
+
+2004-05-05 06:32 yozhik
+
+ * GraphViz/draw_dot.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-05-05 06:32 yozhik
+
+ * GraphViz/draw_dot.m: Initial revision
+
+2004-03-30 09:18 yozhik
+
+ * BNT/: general/mk_named_CPT.m,
+ CPDs/@softmax_CPD/convert_to_table.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-03-30 09:18 yozhik
+
+ * BNT/: general/mk_named_CPT.m,
+ CPDs/@softmax_CPD/convert_to_table.m: Initial revision
+
+2004-03-22 14:32 yozhik
+
+ * GraphViz/draw_graph.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-03-22 14:32 yozhik
+
+ * GraphViz/draw_graph.m: Initial revision
+
+2004-03-12 15:21 yozhik
+
+ * GraphViz/dot_to_graph.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-03-12 15:21 yozhik
+
+ * GraphViz/dot_to_graph.m: Initial revision
+
+2004-03-04 14:34 yozhik
+
+ * BNT/examples/static/burglary.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-03-04 14:34 yozhik
+
+ * BNT/examples/static/burglary.m: Initial revision
+
+2004-03-04 14:27 yozhik
+
+ * BNT/examples/static/burglar-alarm-net.lisp.txt: Initial import of
+ code base from Kevin Murphy.
+
+2004-03-04 14:27 yozhik
+
+ * BNT/examples/static/burglar-alarm-net.lisp.txt: Initial revision
+
+2004-02-28 09:25 yozhik
+
+ * BNT/examples/static/learn1.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-02-28 09:25 yozhik
+
+ * BNT/examples/static/learn1.m: Initial revision
+
+2004-02-22 11:43 yozhik
+
+ * BNT/examples/static/brainy.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-02-22 11:43 yozhik
+
+ * BNT/examples/static/brainy.m: Initial revision
+
+2004-02-20 14:00 yozhik
+
+ * BNT/CPDs/@discrete_CPD/convert_to_pot.m: Initial import of code
+ base from Kevin Murphy.
+
+2004-02-20 14:00 yozhik
+
+ * BNT/CPDs/@discrete_CPD/convert_to_pot.m: Initial revision
+
+2004-02-18 17:12 yozhik
+
+ *
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/enter_soft_evidence.m:
+ Initial import of code base from Kevin Murphy.
+
+2004-02-18 17:12 yozhik
+
+ *
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/enter_soft_evidence.m:
+ Initial revision
+
+2004-02-13 18:06 yozhik
+
+ * HMM/mhmmParzen_train_observed.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-02-13 18:06 yozhik
+
+ * HMM/mhmmParzen_train_observed.m: Initial revision
+
+2004-02-12 15:08 yozhik
+
+ * HMM/gausshmm_train_observed.m: Initial import of code base from
+ Kevin Murphy.
+
+2004-02-12 15:08 yozhik
+
+ * HMM/gausshmm_train_observed.m: Initial revision
+
+2004-02-12 04:57 yozhik
+
+ * BNT/examples/static/HME/hmemenu.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-02-12 04:57 yozhik
+
+ * BNT/examples/static/HME/hmemenu.m: Initial revision
+
+2004-02-07 20:52 yozhik
+
+ * HMM/mhmm_em.m: Initial import of code base from Kevin Murphy.
+
+2004-02-07 20:52 yozhik
+
+ * HMM/mhmm_em.m: Initial revision
+
+2004-02-04 15:53 yozhik
+
+ * BNT/examples/dynamic/mk_orig_bat_dbn.m: Initial import of code
+ base from Kevin Murphy.
+
+2004-02-04 15:53 yozhik
+
+ * BNT/examples/dynamic/mk_orig_bat_dbn.m: Initial revision
+
+2004-02-03 23:42 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/enter_soft_evidence.m:
+ Initial import of code base from Kevin Murphy.
+
+2004-02-03 23:42 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/enter_soft_evidence.m:
+ Initial revision
+
+2004-02-03 09:15 yozhik
+
+ * GraphViz/Old/graphToDot.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-02-03 09:15 yozhik
+
+ * GraphViz/Old/graphToDot.m: Initial revision
+
+2004-01-30 18:57 yozhik
+
+ * BNT/examples/dynamic/mk_orig_water_dbn.m: Initial import of code
+ base from Kevin Murphy.
+
+2004-01-30 18:57 yozhik
+
+ * BNT/examples/dynamic/mk_orig_water_dbn.m: Initial revision
+
+2004-01-27 13:08 yozhik
+
+ * GraphViz/: my_call.m, editGraphGUI.m: Initial import of code base
+ from Kevin Murphy.
+
+2004-01-27 13:08 yozhik
+
+ * GraphViz/: my_call.m, editGraphGUI.m: Initial revision
+
+2004-01-27 13:01 yozhik
+
+ * GraphViz/Old/: dot_to_graph.m, draw_graph.m: Initial import of
+ code base from Kevin Murphy.
+
+2004-01-27 13:01 yozhik
+
+ * GraphViz/Old/: dot_to_graph.m, draw_graph.m: Initial revision
+
+2004-01-27 12:47 yozhik
+
+ * GraphViz/Old/pre_pesha_graph_to_dot.m: Initial import of code
+ base from Kevin Murphy.
+
+2004-01-27 12:47 yozhik
+
+ * GraphViz/Old/pre_pesha_graph_to_dot.m: Initial revision
+
+2004-01-27 12:42 yozhik
+
+ * GraphViz/Old/draw_dot.m: Initial import of code base from Kevin
+ Murphy.
+
+2004-01-27 12:42 yozhik
+
+ * GraphViz/Old/draw_dot.m: Initial revision
+
+2004-01-14 17:06 yozhik
+
+ * BNT/examples/static/Models/mk_hmm_bnet.m: Initial import of code
+ base from Kevin Murphy.
+
+2004-01-14 17:06 yozhik
+
+ * BNT/examples/static/Models/mk_hmm_bnet.m: Initial revision
+
+2004-01-12 12:53 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/enter_evidence.m: Initial
+ import of code base from Kevin Murphy.
+
+2004-01-12 12:53 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/enter_evidence.m: Initial
+ revision
+
+2004-01-04 17:23 yozhik
+
+ * BNT/inference/static/@belprop_mrf2_inf_engine/bp_mrf2.m: Initial
+ import of code base from Kevin Murphy.
+
+2004-01-04 17:23 yozhik
+
+ * BNT/inference/static/@belprop_mrf2_inf_engine/bp_mrf2.m: Initial
+ revision
+
+2003-12-15 22:17 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/marginal_nodes.m: Initial
+ import of code base from Kevin Murphy.
+
+2003-12-15 22:17 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/marginal_nodes.m: Initial
+ revision
+
+2003-10-31 14:37 yozhik
+
+ * BNT/inference/static/@jtree_inf_engine/jtree_inf_engine.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-10-31 14:37 yozhik
+
+ * BNT/inference/static/@jtree_inf_engine/jtree_inf_engine.m:
+ Initial revision
+
+2003-09-05 07:06 yozhik
+
+ * BNT/learning/learn_struct_mcmc.m: Initial import of code base
+ from Kevin Murphy.
+
+2003-09-05 07:06 yozhik
+
+ * BNT/learning/learn_struct_mcmc.m: Initial revision
+
+2003-08-18 14:50 yozhik
+
+ * BNT/learning/learn_params_dbn_em.m: Initial import of code base
+ from Kevin Murphy.
+
+2003-08-18 14:50 yozhik
+
+ * BNT/learning/learn_params_dbn_em.m: Initial revision
+
+2003-07-30 06:37 yozhik
+
+ * BNT/potentials/: @mpot/set_domain_pot.m,
+ @cgpot/Old/set_domain_pot.m, @cgpot/set_domain_pot.m: Initial
+ import of code base from Kevin Murphy.
+
+2003-07-30 06:37 yozhik
+
+ * BNT/potentials/: @mpot/set_domain_pot.m,
+ @cgpot/Old/set_domain_pot.m, @cgpot/set_domain_pot.m: Initial
+ revision
+
+2003-07-28 19:44 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/: dbn_init_bel.m,
+ dbn_marginal_from_bel.m, dbn_update_bel.m, dbn_update_bel1.m,
+ marginal_family.m, update_engine.m: Initial import of code base
+ from Kevin Murphy.
+
+2003-07-28 19:44 yozhik
+
+ * BNT/inference/dynamic/@cbk_inf_engine/: dbn_init_bel.m,
+ dbn_marginal_from_bel.m, dbn_update_bel.m, dbn_update_bel1.m,
+ marginal_family.m, update_engine.m: Initial revision
+
+2003-07-28 15:44 yozhik
+
+ * GraphViz/: approxeq.m, process_options.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-07-28 15:44 yozhik
+
+ * GraphViz/: approxeq.m, process_options.m: Initial revision
+
+2003-07-24 06:41 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/update_ess.m: Initial import of code base
+ from Kevin Murphy.
+
+2003-07-24 06:41 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/update_ess.m: Initial revision
+
+2003-07-22 15:55 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/update_ess.m: Initial import of code base
+ from Kevin Murphy.
+
+2003-07-22 15:55 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/update_ess.m: Initial revision
+
+2003-07-06 13:57 yozhik
+
+ * BNT/inference/static/@pearl_inf_engine/bethe_free_energy.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-07-06 13:57 yozhik
+
+ * BNT/inference/static/@pearl_inf_engine/bethe_free_energy.m:
+ Initial revision
+
+2003-05-21 06:49 yozhik
+
+ * BNT/potentials/@scgpot/: complement_pot.m, normalize_pot.m,
+ recursive_combine_pots.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-05-21 06:49 yozhik
+
+ * BNT/potentials/@scgpot/: complement_pot.m, normalize_pot.m,
+ recursive_combine_pots.m: Initial revision
+
+2003-05-20 07:10 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/maximize_params.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-05-20 07:10 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/maximize_params.m: Initial revision
+
+2003-05-13 09:11 yozhik
+
+ * HMM/mhmm_em_demo.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-05-13 09:11 yozhik
+
+ * HMM/mhmm_em_demo.m: Initial revision
+
+2003-05-13 07:35 yozhik
+
+ * BNT/examples/dynamic/viterbi1.m: Initial import of code base from
+ Kevin Murphy.
+
+2003-05-13 07:35 yozhik
+
+ * BNT/examples/dynamic/viterbi1.m: Initial revision
+
+2003-05-11 16:31 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/convert_to_table.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-05-11 16:31 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/convert_to_table.m: Initial revision
+
+2003-05-11 16:13 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/gaussian_CPD_params_given_dps.m: Initial
+ import of code base from Kevin Murphy.
+
+2003-05-11 16:13 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/gaussian_CPD_params_given_dps.m: Initial
+ revision
+
+2003-05-11 08:39 yozhik
+
+ * BNT/inference/static/@stab_cond_gauss_inf_engine/README: Initial
+ import of code base from Kevin Murphy.
+
+2003-05-11 08:39 yozhik
+
+ * BNT/inference/static/@stab_cond_gauss_inf_engine/README: Initial
+ revision
+
+2003-05-04 15:31 yozhik
+
+ * BNT/uninstallC_BNT.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-05-04 15:31 yozhik
+
+ * BNT/uninstallC_BNT.m: Initial revision
+
+2003-05-04 15:23 yozhik
+
+ * BNT/examples/dynamic/: dhmm1.m, ghmm1.m, mhmm1.m: Initial import
+ of code base from Kevin Murphy.
+
+2003-05-04 15:23 yozhik
+
+ * BNT/examples/dynamic/: dhmm1.m, ghmm1.m, mhmm1.m: Initial
+ revision
+
+2003-05-04 15:11 yozhik
+
+ * HMM/mhmm_logprob.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-05-04 15:11 yozhik
+
+ * HMM/mhmm_logprob.m: Initial revision
+
+2003-05-04 15:01 yozhik
+
+ * HMM/: dhmm_logprob.m, dhmm_em_online.m, dhmm_em_online_demo.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-05-04 15:01 yozhik
+
+ * HMM/: dhmm_logprob.m, dhmm_em_online.m, dhmm_em_online_demo.m:
+ Initial revision
+
+2003-05-04 14:58 yozhik
+
+ * HMM/: pomdp_sample.m, dhmm_sample_endstate.m, dhmm_em_demo.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-05-04 14:58 yozhik
+
+ * HMM/: pomdp_sample.m, dhmm_sample_endstate.m, dhmm_em_demo.m:
+ Initial revision
+
+2003-05-04 14:47 yozhik
+
+ *
+ BNT/inference/online/@hmm_2TBN_inf_engine/private/mk_hmm_obs_lik_vec.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-05-04 14:47 yozhik
+
+ *
+ BNT/inference/online/@hmm_2TBN_inf_engine/private/mk_hmm_obs_lik_vec.m:
+ Initial revision
+
+2003-05-04 14:42 yozhik
+
+ *
+ BNT/inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_matrix.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-05-04 14:42 yozhik
+
+ *
+ BNT/inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_matrix.m:
+ Initial revision
+
+2003-04-22 14:00 yozhik
+
+ * BNT/CPDs/@tabular_CPD/display.m: Initial import of code base from
+ Kevin Murphy.
+
+2003-04-22 14:00 yozhik
+
+ * BNT/CPDs/@tabular_CPD/display.m: Initial revision
+
+2003-03-28 09:22 yozhik
+
+ * BNT/examples/dynamic/ho1.m: Initial import of code base from
+ Kevin Murphy.
+
+2003-03-28 09:22 yozhik
+
+ * BNT/examples/dynamic/ho1.m: Initial revision
+
+2003-03-28 09:12 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/stab_cond_gauss_inf_engine.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-03-28 09:12 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/stab_cond_gauss_inf_engine.m:
+ Initial revision
+
+2003-03-28 08:35 yozhik
+
+ * GraphViz/arrow.m: Initial import of code base from Kevin Murphy.
+
+2003-03-28 08:35 yozhik
+
+ * GraphViz/arrow.m: Initial revision
+
+2003-03-25 16:06 yozhik
+
+ * BNT/examples/static/Models/mk_asia_bnet.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-03-25 16:06 yozhik
+
+ * BNT/examples/static/Models/mk_asia_bnet.m: Initial revision
+
+2003-03-20 07:07 yozhik
+
+ * BNT/potentials/@scgpot/README: Initial import of code base from
+ Kevin Murphy.
+
+2003-03-20 07:07 yozhik
+
+ * BNT/potentials/@scgpot/README: Initial revision
+
+2003-03-14 01:45 yozhik
+
+ *
+ BNT/inference/dynamic/@stable_ho_inf_engine/stable_ho_inf_engine.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-03-14 01:45 yozhik
+
+ *
+ BNT/inference/dynamic/@stable_ho_inf_engine/stable_ho_inf_engine.m:
+ Initial revision
+
+2003-03-12 02:38 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/enter_evidence.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-03-12 02:38 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/enter_evidence.m:
+ Initial revision
+
+2003-03-11 10:07 yozhik
+
+ * BNT/potentials/@scgpot/reduce_pot.m: Initial import of code base
+ from Kevin Murphy.
+
+2003-03-11 10:07 yozhik
+
+ * BNT/potentials/@scgpot/reduce_pot.m: Initial revision
+
+2003-03-11 09:49 yozhik
+
+ * BNT/potentials/@scgpot/combine_pots.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-03-11 09:49 yozhik
+
+ * BNT/potentials/@scgpot/combine_pots.m: Initial revision
+
+2003-03-11 09:37 yozhik
+
+ * BNT/potentials/@scgcpot/reduce_pot.m: Initial import of code base
+ from Kevin Murphy.
+
+2003-03-11 09:37 yozhik
+
+ * BNT/potentials/@scgcpot/reduce_pot.m: Initial revision
+
+2003-03-11 09:06 yozhik
+
+ * BNT/potentials/@scgpot/marginalize_pot.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-03-11 09:06 yozhik
+
+ * BNT/potentials/@scgpot/marginalize_pot.m: Initial revision
+
+2003-03-11 06:04 yozhik
+
+ * BNT/potentials/@scgpot/scgpot.m: Initial import of code base from
+ Kevin Murphy.
+
+2003-03-11 06:04 yozhik
+
+ * BNT/potentials/@scgpot/scgpot.m: Initial revision
+
+2003-03-09 15:03 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/convert_to_pot.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-03-09 15:03 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/convert_to_pot.m: Initial revision
+
+2003-03-09 14:44 yozhik
+
+ * BNT/CPDs/@tabular_CPD/maximize_params.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-03-09 14:44 yozhik
+
+ * BNT/CPDs/@tabular_CPD/maximize_params.m: Initial revision
+
+2003-02-21 03:20 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_difclq_nodes.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-02-21 03:20 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_difclq_nodes.m:
+ Initial revision
+
+2003-02-21 03:13 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_nodes.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-02-21 03:13 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_nodes.m:
+ Initial revision
+
+2003-02-19 01:52 yozhik
+
+ * BNT/inference/dynamic/@stable_ho_inf_engine/: enter_evidence.m,
+ marginal_family.m, marginal_nodes.m, test_ho_inf_enginge.m,
+ update_engine.m: Initial import of code base from Kevin Murphy.
+
+2003-02-19 01:52 yozhik
+
+ * BNT/inference/dynamic/@stable_ho_inf_engine/: enter_evidence.m,
+ marginal_family.m, marginal_nodes.m, test_ho_inf_enginge.m,
+ update_engine.m: Initial revision
+
+2003-02-10 07:38 yozhik
+
+ * BNT/inference/static/@stab_cond_gauss_inf_engine/push.m: Initial
+ import of code base from Kevin Murphy.
+
+2003-02-10 07:38 yozhik
+
+ * BNT/inference/static/@stab_cond_gauss_inf_engine/push.m: Initial
+ revision
+
+2003-02-06 18:25 yozhik
+
+ * KPMtools/checkpsd.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-02-06 18:25 yozhik
+
+ * KPMtools/checkpsd.m: Initial revision
+
+2003-02-05 19:16 yozhik
+
+ * GraphViz/draw_hmm.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-02-05 19:16 yozhik
+
+ * GraphViz/draw_hmm.m: Initial revision
+
+2003-02-01 16:23 yozhik
+
+ * BNT/: general/dbn_to_hmm.m, learning/learn_params_dbn.m: Initial
+ import of code base from Kevin Murphy.
+
+2003-02-01 16:23 yozhik
+
+ * BNT/: general/dbn_to_hmm.m, learning/learn_params_dbn.m: Initial
+ revision
+
+2003-02-01 11:42 yozhik
+
+ * BNT/general/mk_dbn.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-02-01 11:42 yozhik
+
+ * BNT/general/mk_dbn.m: Initial revision
+
+2003-01-30 16:13 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/maximize_params_debug.m: Initial import of
+ code base from Kevin Murphy.
+
+2003-01-30 16:13 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/maximize_params_debug.m: Initial revision
+
+2003-01-30 14:38 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/Old/maximize_params.m: Initial import of
+ code base from Kevin Murphy.
+
+2003-01-30 14:38 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/Old/maximize_params.m: Initial revision
+
+2003-01-29 03:23 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_singleclq_nodes.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-01-29 03:23 yozhik
+
+ *
+ BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_singleclq_nodes.m:
+ Initial revision
+
+2003-01-24 11:36 yozhik
+
+ * Kalman/sample_lds.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-01-24 11:36 yozhik
+
+ * Kalman/sample_lds.m: Initial revision
+
+2003-01-24 04:52 yozhik
+
+ * BNT/potentials/@scgpot/extension_pot.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-01-24 04:52 yozhik
+
+ * BNT/potentials/@scgpot/extension_pot.m: Initial revision
+
+2003-01-23 10:49 yozhik
+
+ * BNT/: general/convert_dbn_CPDs_to_tables1.m,
+ inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_vec.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-01-23 10:49 yozhik
+
+ * BNT/: general/convert_dbn_CPDs_to_tables1.m,
+ inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_vec.m:
+ Initial revision
+
+2003-01-23 10:44 yozhik
+
+ * BNT/general/convert_dbn_CPDs_to_tables.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-01-23 10:44 yozhik
+
+ * BNT/general/convert_dbn_CPDs_to_tables.m: Initial revision
+
+2003-01-22 13:38 yozhik
+
+ * BNT/inference/dynamic/@hmm_inf_engine/enter_evidence.m: Initial
+ import of code base from Kevin Murphy.
+
+2003-01-22 13:38 yozhik
+
+ * BNT/inference/dynamic/@hmm_inf_engine/enter_evidence.m: Initial
+ revision
+
+2003-01-22 12:32 yozhik
+
+ * HMM/mc_sample_endstate.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-01-22 12:32 yozhik
+
+ * HMM/mc_sample_endstate.m: Initial revision
+
+2003-01-22 09:56 yozhik
+
+ * HMM/fixed_lag_smoother.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-01-22 09:56 yozhik
+
+ * HMM/fixed_lag_smoother.m: Initial revision
+
+2003-01-20 08:56 yozhik
+
+ * GraphViz/draw_graph_test.m: Initial import of code base from
+ Kevin Murphy.
+
+2003-01-20 08:56 yozhik
+
+ * GraphViz/draw_graph_test.m: Initial revision
+
+2003-01-18 15:10 yozhik
+
+ * BNT/general/dsep_test.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-01-18 15:10 yozhik
+
+ * BNT/general/dsep_test.m: Initial revision
+
+2003-01-18 15:00 yozhik
+
+ * BNT/copyright.txt: Initial import of code base from Kevin Murphy.
+
+2003-01-18 15:00 yozhik
+
+ * BNT/copyright.txt: Initial revision
+
+2003-01-18 14:49 yozhik
+
+ * Kalman/tracking_demo.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-01-18 14:49 yozhik
+
+ * Kalman/tracking_demo.m: Initial revision
+
+2003-01-18 14:22 yozhik
+
+ * BNT/: examples/dummy, inference/dummy, inference/dynamic/dummy,
+ inference/online/dummy, inference/static/dummy: Initial import of
+ code base from Kevin Murphy.
+
+2003-01-18 14:22 yozhik
+
+ * BNT/: examples/dummy, inference/dummy, inference/dynamic/dummy,
+ inference/online/dummy, inference/static/dummy: Initial revision
+
+2003-01-18 14:16 yozhik
+
+ * BNT/examples/dynamic/: ehmm1.m, jtree_clq_test.m: Initial import
+ of code base from Kevin Murphy.
+
+2003-01-18 14:16 yozhik
+
+ * BNT/examples/dynamic/: ehmm1.m, jtree_clq_test.m: Initial
+ revision
+
+2003-01-18 14:11 yozhik
+
+ * BNT/inference/static/:
+ @jtree_sparse_inf_engine/jtree_sparse_inf_engine.m,
+ @jtree_mnet_inf_engine/jtree_mnet_inf_engine.m: Initial import of
+ code base from Kevin Murphy.
+
+2003-01-18 14:11 yozhik
+
+ * BNT/inference/static/:
+ @jtree_sparse_inf_engine/jtree_sparse_inf_engine.m,
+ @jtree_mnet_inf_engine/jtree_mnet_inf_engine.m: Initial revision
+
+2003-01-18 13:17 yozhik
+
+ * GraphViz/draw_dbn_test.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-01-18 13:17 yozhik
+
+ * GraphViz/draw_dbn_test.m: Initial revision
+
+2003-01-11 10:53 yozhik
+
+ * BNT/inference/static/@pearl_inf_engine/pearl_inf_engine.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-01-11 10:53 yozhik
+
+ * BNT/inference/static/@pearl_inf_engine/pearl_inf_engine.m:
+ Initial revision
+
+2003-01-11 10:48 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/learn_map.m: Initial import of code
+ base from Kevin Murphy.
+
+2003-01-11 10:48 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/learn_map.m: Initial revision
+
+2003-01-11 10:41 yozhik
+
+ * BNT/inference/dynamic/@jtree_dbn_inf_engine/enter_evidence.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-01-11 10:41 yozhik
+
+ * BNT/inference/dynamic/@jtree_dbn_inf_engine/enter_evidence.m:
+ Initial revision
+
+2003-01-11 10:13 yozhik
+
+ * BNT/inference/dynamic/@bk_inf_engine/enter_soft_evidence.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-01-11 10:13 yozhik
+
+ * BNT/inference/dynamic/@bk_inf_engine/enter_soft_evidence.m:
+ Initial revision
+
+2003-01-07 08:25 yozhik
+
+ * BNT/CPDs/@softmax_CPD/softmax_CPD.m: Initial import of code base
+ from Kevin Murphy.
+
+2003-01-07 08:25 yozhik
+
+ * BNT/CPDs/@softmax_CPD/softmax_CPD.m: Initial revision
+
+2003-01-03 14:01 yozhik
+
+ *
+ BNT/inference/static/@belprop_mrf2_inf_engine/belprop_mrf2_inf_engine.m:
+ Initial import of code base from Kevin Murphy.
+
+2003-01-03 14:01 yozhik
+
+ *
+ BNT/inference/static/@belprop_mrf2_inf_engine/belprop_mrf2_inf_engine.m:
+ Initial revision
+
+2003-01-02 09:49 yozhik
+
+ * BNT/inference/static/@belprop_mrf2_inf_engine/find_mpe.m: Initial
+ import of code base from Kevin Murphy.
+
+2003-01-02 09:49 yozhik
+
+ * BNT/inference/static/@belprop_mrf2_inf_engine/find_mpe.m: Initial
+ revision
+
+2003-01-02 09:28 yozhik
+
+ * BNT/inference/static/@belprop_mrf2_inf_engine/: set_params.m,
+ enter_soft_evidence.m: Initial import of code base from Kevin
+ Murphy.
+
+2003-01-02 09:28 yozhik
+
+ * BNT/inference/static/@belprop_mrf2_inf_engine/: set_params.m,
+ enter_soft_evidence.m: Initial revision
+
+2002-12-31 14:06 yozhik
+
+ * BNT/general/mk_mrf2.m: Initial import of code base from Kevin
+ Murphy.
+
+2002-12-31 14:06 yozhik
+
+ * BNT/general/mk_mrf2.m: Initial revision
+
+2002-12-31 13:24 yozhik
+
+ * BNT/inference/static/@belprop_mrf2_inf_engine/marginal_nodes.m:
+ Initial import of code base from Kevin Murphy.
+
+2002-12-31 13:24 yozhik
+
+ * BNT/inference/static/@belprop_mrf2_inf_engine/marginal_nodes.m:
+ Initial revision
+
+2002-12-31 11:00 yozhik
+
+ * BNT/inference/static/@belprop_inf_engine/belprop_inf_engine.m:
+ Initial import of code base from Kevin Murphy.
+
+2002-12-31 11:00 yozhik
+
+ * BNT/inference/static/@belprop_inf_engine/belprop_inf_engine.m:
+ Initial revision
+
+2002-12-16 11:16 yozhik
+
+ * BNT/examples/dynamic/HHMM/remove_hhmm_end_state.m: Initial import
+ of code base from Kevin Murphy.
+
+2002-12-16 11:16 yozhik
+
+ * BNT/examples/dynamic/HHMM/remove_hhmm_end_state.m: Initial
+ revision
+
+2002-12-16 09:57 yozhik
+
+ * BNT/general/unroll_set.m: Initial import of code base from Kevin
+ Murphy.
+
+2002-12-16 09:57 yozhik
+
+ * BNT/general/unroll_set.m: Initial revision
+
+2002-11-26 14:14 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/mgram3.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-11-26 14:14 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/mgram3.m: Initial revision
+
+2002-11-26 14:04 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/mgram2.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-11-26 14:04 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/mgram2.m: Initial revision
+
+2002-11-22 16:44 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/Old/mgram2.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-11-22 16:44 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/Old/mgram2.m: Initial revision
+
+2002-11-22 15:59 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/mgram1.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-11-22 15:59 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/mgram1.m: Initial revision
+
+2002-11-22 15:51 yozhik
+
+ * BNT/inference/dynamic/@jtree_dbn_inf_engine/marginal_nodes.m:
+ Initial import of code base from Kevin Murphy.
+
+2002-11-22 15:51 yozhik
+
+ * BNT/inference/dynamic/@jtree_dbn_inf_engine/marginal_nodes.m:
+ Initial revision
+
+2002-11-22 15:07 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/: num2letter.m, letter2num.m:
+ Initial import of code base from Kevin Murphy.
+
+2002-11-22 15:07 yozhik
+
+ * BNT/examples/dynamic/HHMM/Mgram/: num2letter.m, letter2num.m:
+ Initial revision
+
+2002-11-22 14:35 yozhik
+
+ * BNT/general/convert_dbn_CPDs_to_pots.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-11-22 14:35 yozhik
+
+ * BNT/general/convert_dbn_CPDs_to_pots.m: Initial revision
+
+2002-11-22 13:45 yozhik
+
+ * HMM/mk_rightleft_transmat.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-11-22 13:45 yozhik
+
+ * HMM/mk_rightleft_transmat.m: Initial revision
+
+2002-11-14 12:33 yozhik
+
+ * BNT/examples/dynamic/water2.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-11-14 12:33 yozhik
+
+ * BNT/examples/dynamic/water2.m: Initial revision
+
+2002-11-14 12:07 yozhik
+
+ * BNT/examples/dynamic/water1.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-11-14 12:07 yozhik
+
+ * BNT/examples/dynamic/water1.m: Initial revision
+
+2002-11-14 12:02 yozhik
+
+ * BNT/inference/: online/@hmm_2TBN_inf_engine/marginal_nodes.m,
+ dynamic/@hmm_inf_engine/marginal_nodes.m,
+ online/@hmm_2TBN_inf_engine/hmm_2TBN_inf_engine.m,
+ dynamic/@hmm_inf_engine/hmm_inf_engine.m,
+ dynamic/@hmm_inf_engine/marginal_family.m,
+ online/@hmm_2TBN_inf_engine/marginal_family.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-11-14 12:02 yozhik
+
+ * BNT/inference/: online/@hmm_2TBN_inf_engine/marginal_nodes.m,
+ dynamic/@hmm_inf_engine/marginal_nodes.m,
+ online/@hmm_2TBN_inf_engine/hmm_2TBN_inf_engine.m,
+ dynamic/@hmm_inf_engine/hmm_inf_engine.m,
+ dynamic/@hmm_inf_engine/marginal_family.m,
+ online/@hmm_2TBN_inf_engine/marginal_family.m: Initial revision
+
+2002-11-14 08:31 yozhik
+
+ * BNT/inference/:
+ online/@jtree_2TBN_inf_engine/jtree_2TBN_inf_engine.m,
+ dynamic/@jtree_dbn_inf_engine/jtree_dbn_inf_engine.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-11-14 08:31 yozhik
+
+ * BNT/inference/:
+ online/@jtree_2TBN_inf_engine/jtree_2TBN_inf_engine.m,
+ dynamic/@jtree_dbn_inf_engine/jtree_dbn_inf_engine.m: Initial
+ revision
+
+2002-11-13 17:01 yozhik
+
+ * BNT/examples/: static/qmr2.m, dynamic/arhmm1.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-11-13 17:01 yozhik
+
+ * BNT/examples/: static/qmr2.m, dynamic/arhmm1.m: Initial revision
+
+2002-11-03 08:44 yozhik
+
+ * BNT/examples/static/Models/mk_alarm_bnet.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-11-03 08:44 yozhik
+
+ * BNT/examples/static/Models/mk_alarm_bnet.m: Initial revision
+
+2002-11-01 16:32 yozhik
+
+ * Kalman/kalman_forward_backward.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-11-01 16:32 yozhik
+
+ * Kalman/kalman_forward_backward.m: Initial revision
+
+2002-10-23 08:17 yozhik
+
+ * Kalman/learning_demo.m: Initial import of code base from Kevin
+ Murphy.
+
+2002-10-23 08:17 yozhik
+
+ * Kalman/learning_demo.m: Initial revision
+
+2002-10-18 13:05 yozhik
+
+ * BNT/inference/static/@pearl_inf_engine/marginal_family.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-10-18 13:05 yozhik
+
+ * BNT/inference/static/@pearl_inf_engine/marginal_family.m: Initial
+ revision
+
+2002-10-10 16:45 yozhik
+
+ * BNT/examples/dynamic/jtree_clq_test2.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-10-10 16:45 yozhik
+
+ * BNT/examples/dynamic/jtree_clq_test2.m: Initial revision
+
+2002-10-10 16:14 yozhik
+
+ * BNT/examples/dynamic/: mk_mildew_dbn.m, mk_uffe_dbn.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-10-10 16:14 yozhik
+
+ * BNT/examples/dynamic/: mk_mildew_dbn.m, mk_uffe_dbn.m: Initial
+ revision
+
+2002-10-09 13:36 yozhik
+
+ * BNT/examples/dynamic/mk_ps_from_clqs.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-10-09 13:36 yozhik
+
+ * BNT/examples/dynamic/mk_ps_from_clqs.m: Initial revision
+
+2002-10-07 06:26 yozhik
+
+ * BNT/CPDs/@deterministic_CPD/deterministic_CPD.m: Initial import
+ of code base from Kevin Murphy.
+
+2002-10-07 06:26 yozhik
+
+ * BNT/CPDs/@deterministic_CPD/deterministic_CPD.m: Initial revision
+
+2002-10-02 08:39 yozhik
+
+ * BNT/potentials/Tables/marg_tableC.c: Initial import of code base
+ from Kevin Murphy.
+
+2002-10-02 08:39 yozhik
+
+ * BNT/potentials/Tables/marg_tableC.c: Initial revision
+
+2002-10-02 08:28 yozhik
+
+ * BNT/potentials/Tables/: mult_by_tableM.m, mult_by_table2.m:
+ Initial import of code base from Kevin Murphy.
+
+2002-10-02 08:28 yozhik
+
+ * BNT/potentials/Tables/: mult_by_tableM.m, mult_by_table2.m:
+ Initial revision
+
+2002-10-01 14:33 yozhik
+
+ * BNT/potentials/Tables/mult_by_tableC.c: Initial import of code
+ base from Kevin Murphy.
+
+2002-10-01 14:33 yozhik
+
+ * BNT/potentials/Tables/mult_by_tableC.c: Initial revision
+
+2002-10-01 14:23 yozhik
+
+ * BNT/potentials/Tables/mult_by_table.c: Initial import of code
+ base from Kevin Murphy.
+
+2002-10-01 14:23 yozhik
+
+ * BNT/potentials/Tables/mult_by_table.c: Initial revision
+
+2002-10-01 14:20 yozhik
+
+ * BNT/potentials/Tables/repmat_and_mult.c: Initial import of code
+ base from Kevin Murphy.
+
+2002-10-01 14:20 yozhik
+
+ * BNT/potentials/Tables/repmat_and_mult.c: Initial revision
+
+2002-10-01 12:04 yozhik
+
+ * BNT/potentials/@dpot/dpot.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-10-01 12:04 yozhik
+
+ * BNT/potentials/@dpot/dpot.m: Initial revision
+
+2002-10-01 11:21 yozhik
+
+ * BNT/examples/static/Belprop/belprop_polytree_discrete.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-10-01 11:21 yozhik
+
+ * BNT/examples/static/Belprop/belprop_polytree_discrete.m: Initial
+ revision
+
+2002-10-01 11:16 yozhik
+
+ * BNT/examples/static/cmp_inference_static.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-10-01 11:16 yozhik
+
+ * BNT/examples/static/cmp_inference_static.m: Initial revision
+
+2002-10-01 10:39 yozhik
+
+ * BNT/potentials/Tables/marg_tableM.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-10-01 10:39 yozhik
+
+ * BNT/potentials/Tables/marg_tableM.m: Initial revision
+
+2002-09-29 03:21 yozhik
+
+ * BNT/potentials/Tables/mult_by_table_global.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-09-29 03:21 yozhik
+
+ * BNT/potentials/Tables/mult_by_table_global.m: Initial revision
+
+2002-09-26 01:39 yozhik
+
+ * BNT/learning/learn_struct_K2.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-09-26 01:39 yozhik
+
+ * BNT/learning/learn_struct_K2.m: Initial revision
+
+2002-09-24 15:43 yozhik
+
+ * BNT/: CPDs/@hhmm2Q_CPD/update_ess.m,
+ CPDs/@hhmm2Q_CPD/maximize_params.m,
+ examples/dynamic/HHMM/Map/disp_map_hhmm.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-09-24 15:43 yozhik
+
+ * BNT/: CPDs/@hhmm2Q_CPD/update_ess.m,
+ CPDs/@hhmm2Q_CPD/maximize_params.m,
+ examples/dynamic/HHMM/Map/disp_map_hhmm.m: Initial revision
+
+2002-09-24 15:34 yozhik
+
+ * BNT/CPDs/@hhmm2Q_CPD/: hhmm2Q_CPD.m, reset_ess.m: Initial import
+ of code base from Kevin Murphy.
+
+2002-09-24 15:34 yozhik
+
+ * BNT/CPDs/@hhmm2Q_CPD/: hhmm2Q_CPD.m, reset_ess.m: Initial
+ revision
+
+2002-09-24 15:13 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/mk_rnd_map_hhmm.m: Initial import
+ of code base from Kevin Murphy.
+
+2002-09-24 15:13 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/mk_rnd_map_hhmm.m: Initial revision
+
+2002-09-24 06:10 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/maximize_params.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-09-24 06:10 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/maximize_params.m: Initial revision
+
+2002-09-24 06:02 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/sample_from_map.m: Initial import
+ of code base from Kevin Murphy.
+
+2002-09-24 06:02 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/sample_from_map.m: Initial revision
+
+2002-09-24 05:46 yozhik
+
+ * BNT/CPDs/@hhmm2Q_CPD/CPD_to_CPT.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-09-24 05:46 yozhik
+
+ * BNT/CPDs/@hhmm2Q_CPD/CPD_to_CPT.m: Initial revision
+
+2002-09-24 03:49 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/mk_map_hhmm.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-09-24 03:49 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/mk_map_hhmm.m: Initial revision
+
+2002-09-24 00:02 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/Old/mk_map_hhmm.m: Initial import
+ of code base from Kevin Murphy.
+
+2002-09-24 00:02 yozhik
+
+ * BNT/examples/dynamic/HHMM/Map/Old/mk_map_hhmm.m: Initial revision
+
+2002-09-23 21:19 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/hhmmQ_CPD.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-09-23 21:19 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/hhmmQ_CPD.m: Initial revision
+
+2002-09-23 19:58 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/update_CPT.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-09-23 19:58 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/update_CPT.m: Initial revision
+
+2002-09-23 19:30 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/update_CPT.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-09-23 19:30 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/update_CPT.m: Initial revision
+
+2002-09-21 14:37 yozhik
+
+ * BNT/examples/dynamic/HHMM/abcd_hhmm.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-09-21 14:37 yozhik
+
+ * BNT/examples/dynamic/HHMM/abcd_hhmm.m: Initial revision
+
+2002-09-21 13:58 yozhik
+
+ * BNT/examples/dynamic/HHMM/mk_hhmm.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-09-21 13:58 yozhik
+
+ * BNT/examples/dynamic/HHMM/mk_hhmm.m: Initial revision
+
+2002-09-10 10:44 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/log_prob_node.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-09-10 10:44 yozhik
+
+ * BNT/CPDs/@gaussian_CPD/log_prob_node.m: Initial revision
+
+2002-07-28 16:09 yozhik
+
+ * BNT/learning/: learn_struct_pdag_pc_constrain.m, CovMat.m:
+ Initial import of code base from Kevin Murphy.
+
+2002-07-28 16:09 yozhik
+
+ * BNT/learning/: learn_struct_pdag_pc_constrain.m, CovMat.m:
+ Initial revision
+
+2002-07-24 07:48 yozhik
+
+ * BNT/general/hodbn_to_bnet.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-07-24 07:48 yozhik
+
+ * BNT/general/hodbn_to_bnet.m: Initial revision
+
+2002-07-23 06:17 yozhik
+
+ * BNT/general/mk_higher_order_dbn.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-07-23 06:17 yozhik
+
+ * BNT/general/mk_higher_order_dbn.m: Initial revision
+
+2002-07-20 18:25 yozhik
+
+ * BNT/inference/online/@jtree_2TBN_inf_engine/set_fields.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-07-20 18:25 yozhik
+
+ * BNT/inference/online/@jtree_2TBN_inf_engine/set_fields.m: Initial
+ revision
+
+2002-07-20 17:32 yozhik
+
+ * BNT/inference/online/@jtree_2TBN_inf_engine/back_mpe.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-07-20 17:32 yozhik
+
+ * BNT/inference/online/@jtree_2TBN_inf_engine/back_mpe.m: Initial
+ revision
+
+2002-07-02 15:56 yozhik
+
+ * BNT/examples/dynamic/HHMM/Motif/learn_motif_hhmm.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-07-02 15:56 yozhik
+
+ * BNT/examples/dynamic/HHMM/Motif/learn_motif_hhmm.m: Initial
+ revision
+
+2002-06-27 13:34 yozhik
+
+ * BNT/general/add_ev_to_dmarginal.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-06-27 13:34 yozhik
+
+ * BNT/general/add_ev_to_dmarginal.m: Initial revision
+
+2002-06-24 16:54 yozhik
+
+ * BNT/CPDs/@hhmmF_CPD/update_ess.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-06-24 16:54 yozhik
+
+ * BNT/CPDs/@hhmmF_CPD/update_ess.m: Initial revision
+
+2002-06-24 16:38 yozhik
+
+ * BNT/CPDs/@hhmmF_CPD/hhmmF_CPD.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-06-24 16:38 yozhik
+
+ * BNT/CPDs/@hhmmF_CPD/hhmmF_CPD.m: Initial revision
+
+2002-06-24 15:45 yozhik
+
+ * BNT/CPDs/@hhmmF_CPD/update_CPT.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-06-24 15:45 yozhik
+
+ * BNT/CPDs/@hhmmF_CPD/update_CPT.m: Initial revision
+
+2002-06-24 15:35 yozhik
+
+ * BNT/CPDs/@hhmmF_CPD/Old/: hhmmF_CPD.m, log_prior.m,
+ maximize_params.m, reset_ess.m, update_CPT.m, update_ess.m:
+ Initial import of code base from Kevin Murphy.
+
+2002-06-24 15:35 yozhik
+
+ * BNT/CPDs/@hhmmF_CPD/Old/: hhmmF_CPD.m, log_prior.m,
+ maximize_params.m, reset_ess.m, update_CPT.m, update_ess.m:
+ Initial revision
+
+2002-06-24 15:23 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/update_ess4.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-06-24 15:23 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/update_ess4.m: Initial revision
+
+2002-06-24 15:08 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/update_ess3.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-06-24 15:08 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/update_ess3.m: Initial revision
+
+2002-06-24 14:20 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/update_ess2.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-06-24 14:20 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/update_ess2.m: Initial revision
+
+2002-06-24 11:56 yozhik
+
+ * BNT/: general/mk_fgraph_given_ev.m,
+ CPDs/mk_isolated_tabular_CPD.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-06-24 11:56 yozhik
+
+ * BNT/: general/mk_fgraph_given_ev.m,
+ CPDs/mk_isolated_tabular_CPD.m: Initial revision
+
+2002-06-24 11:19 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/: hhmmQ_CPD.m, log_prior.m,
+ maximize_params.m, reset_ess.m, update_ess.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-06-24 11:19 yozhik
+
+ * BNT/CPDs/@hhmmQ_CPD/Old/: hhmmQ_CPD.m, log_prior.m,
+ maximize_params.m, reset_ess.m, update_ess.m: Initial revision
+
+2002-06-20 13:30 yozhik
+
+ * BNT/examples/dynamic/mildew1.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-06-20 13:30 yozhik
+
+ * BNT/examples/dynamic/mildew1.m: Initial revision
+
+2002-06-19 17:18 yozhik
+
+ * BNT/: inference/dynamic/@hmm_inf_engine/find_mpe.m,
+ examples/dynamic/HHMM/Square/learn_square_hhmm_cts.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-06-19 17:18 yozhik
+
+ * BNT/: inference/dynamic/@hmm_inf_engine/find_mpe.m,
+ examples/dynamic/HHMM/Square/learn_square_hhmm_cts.m: Initial
+ revision
+
+2002-06-19 17:03 yozhik
+
+ * BNT/examples/static/fgraph/fg1.m: Initial import of code base
+ from Kevin Murphy.
+
+2002-06-19 17:03 yozhik
+
+ * BNT/examples/static/fgraph/fg1.m: Initial revision
+
+2002-06-19 16:59 yozhik
+
+ * BNT/: examples/static/softev1.m,
+ inference/static/@belprop_fg_inf_engine/find_mpe.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-06-19 16:59 yozhik
+
+ * BNT/: examples/static/softev1.m,
+ inference/static/@belprop_fg_inf_engine/find_mpe.m: Initial
+ revision
+
+2002-06-19 15:11 yozhik
+
+ * BNT/inference/static/@var_elim_inf_engine/find_mpe.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-06-19 15:11 yozhik
+
+ * BNT/inference/static/@var_elim_inf_engine/find_mpe.m: Initial
+ revision
+
+2002-06-19 15:08 yozhik
+
+ * BNT/: inference/static/@belprop_inf_engine/find_mpe.m,
+ examples/static/mpe1.m, examples/static/mpe2.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-06-19 15:08 yozhik
+
+ * BNT/: inference/static/@belprop_inf_engine/find_mpe.m,
+ examples/static/mpe1.m, examples/static/mpe2.m: Initial revision
+
+2002-06-19 15:04 yozhik
+
+ * BNT/inference/static/@var_elim_inf_engine/:
+ var_elim_inf_engine.m, enter_evidence.m: Initial import of code
+ base from Kevin Murphy.
+
+2002-06-19 15:04 yozhik
+
+ * BNT/inference/static/@var_elim_inf_engine/:
+ var_elim_inf_engine.m, enter_evidence.m: Initial revision
+
+2002-06-19 14:56 yozhik
+
+ * BNT/inference/static/@global_joint_inf_engine/find_mpe.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-06-19 14:56 yozhik
+
+ * BNT/inference/static/@global_joint_inf_engine/find_mpe.m: Initial
+ revision
+
+2002-06-17 16:49 yozhik
+
+ * BNT/inference/online/: @jtree_2TBN_inf_engine/back1_mpe.m,
+ @smoother_engine/find_mpe.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-06-17 16:49 yozhik
+
+ * BNT/inference/online/: @jtree_2TBN_inf_engine/back1_mpe.m,
+ @smoother_engine/find_mpe.m: Initial revision
+
+2002-06-17 16:46 yozhik
+
+ * BNT/inference/online/: @jtree_2TBN_inf_engine/fwd.m,
+ @jtree_2TBN_inf_engine/fwd1.m, @smoother_engine/enter_evidence.m:
+ Initial import of code base from Kevin Murphy.
+
+2002-06-17 16:46 yozhik
+
+ * BNT/inference/online/: @jtree_2TBN_inf_engine/fwd.m,
+ @jtree_2TBN_inf_engine/fwd1.m, @smoother_engine/enter_evidence.m:
+ Initial revision
+
+2002-06-17 16:38 yozhik
+
+ * BNT/inference/online/@jtree_2TBN_inf_engine/backT_mpe.m: Initial
+ import of code base from Kevin Murphy.
+
+2002-06-17 16:38 yozhik
+
+ * BNT/inference/online/@jtree_2TBN_inf_engine/backT_mpe.m: Initial
+ revision
+
+2002-06-17 16:34 yozhik
+
+ * BNT/inference/online/@jtree_2TBN_inf_engine/: back.m, backT.m,
+ back1.m: Initial import of code base from Kevin Murphy.
+
+2002-06-17 16:34 yozhik
+
+ * BNT/inference/online/@jtree_2TBN_inf_engine/: back.m, backT.m,
+ back1.m: Initial revision
+
+2002-06-17 16:14 yozhik
+
+ * BNT/inference/static/@jtree_inf_engine/: find_mpe.m,
+ find_max_config.m: Initial import of code base from Kevin Murphy.
+
+2002-06-17 16:14 yozhik
+
+ * BNT/inference/static/@jtree_inf_engine/: find_mpe.m,
+ find_max_config.m: Initial revision
+
+2002-06-17 14:58 yozhik
+
+ * BNT/general/Old/calc_mpe.m: Initial import of code base from
+ Kevin Murphy.
+
+2002-06-17 14:58 yozhik
+
+ * BNT/general/Old/calc_mpe.m: Initial revision
+
+2002-06-17 13:59 yozhik
+
+ * BNT/inference/static/@jtree_inf_engine/: enter_evidence.m,
+ distribute_evidence.m: Initial import of code base from Kevin
+ Murphy.
+
+2002-06-17 13:59 yozhik
+
+ * BNT/inference/static/@jtree_inf_engine/: enter_evidence.m,
+ distribute_evidence.m: Initial revision
+
+2002-06-17 13:29 yozhik
+
+ * BNT/inference/static/@jtree_mnet_inf_engine/: find_mpe.m,
+ enter_evidence.m: Initial import of code base from Kevin Murphy.
+
+2002-06-17 13:29 yozhik
+
+ * BNT/inference/static/@jtree_mnet_inf_engine/: find_mpe.m,
+ enter_evidence.m: Initial revision
+
+2002-06-16 13:01 yozhik
+
+ * BNT/general/is_mnet.m: Initial import of code base from Kevin
+ Murphy.
+
+2002-06-16 13:01 yozhik
+
+ * BNT/general/is_mnet.m: Initial revision
+
+2002-06-16 12:52 yozhik
+
+ * BNT/general/mk_mnet.m: Initial import of code base from Kevin
+ Murphy.
+
+2002-06-16 12:52 yozhik
+
+ * BNT/general/mk_mnet.m: Initial revision
+
+2002-06-16 12:34 yozhik
+
+ * BNT/inference/static/@jtree_inf_engine/init_pot.m: Initial import
+ of code base from Kevin Murphy.
+
+2002-06-16 12:34 yozhik
+
+ * BNT/inference/static/@jtree_inf_engine/init_pot.m: Initial
+ revision
+
+2002-06-16 12:06 yozhik
+
+ * BNT/potentials/@dpot/find_most_prob_entry.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-06-16 12:06 yozhik
+
+ * BNT/potentials/@dpot/find_most_prob_entry.m: Initial revision
+
+2002-05-31 03:25 yozhik
+
+ * BNT/general/unroll_higher_order_topology.m: Initial import of
+ code base from Kevin Murphy.
+
+2002-05-31 03:25 yozhik
+
+ * BNT/general/unroll_higher_order_topology.m: Initial revision
+
+2002-05-29 08:59 yozhik
+
+ * BNT/@assocarray/assocarray.m,
+ BNT/CPDs/@boolean_CPD/boolean_CPD.m,
+ BNT/CPDs/@discrete_CPD/CPD_to_lambda_msg.m,
+ BNT/CPDs/@discrete_CPD/CPD_to_pi.m,
+ BNT/CPDs/@discrete_CPD/CPD_to_scgpot.m,
+ BNT/CPDs/@discrete_CPD/README,
+ BNT/CPDs/@discrete_CPD/convert_CPD_to_table_hidden_ps.m,
+ BNT/CPDs/@discrete_CPD/convert_obs_CPD_to_table.m,
+ BNT/CPDs/@discrete_CPD/convert_to_sparse_table.c,
+ BNT/CPDs/@discrete_CPD/convert_to_table.m,
+ BNT/CPDs/@discrete_CPD/discrete_CPD.m,
+ BNT/CPDs/@discrete_CPD/dom_sizes.m,
+ BNT/CPDs/@discrete_CPD/log_prob_node.m,
+ BNT/CPDs/@discrete_CPD/prob_node.m,
+ BNT/CPDs/@discrete_CPD/sample_node.m,
+ BNT/CPDs/@discrete_CPD/Old/convert_to_pot.m,
+ BNT/CPDs/@discrete_CPD/Old/convert_to_table.m,
+ BNT/CPDs/@discrete_CPD/Old/prob_CPD.m,
+ BNT/CPDs/@discrete_CPD/Old/prob_node.m,
+ BNT/CPDs/@discrete_CPD/private/prod_CPT_and_pi_msgs.m,
+ BNT/CPDs/@gaussian_CPD/CPD_to_lambda_msg.m,
+ BNT/CPDs/@gaussian_CPD/CPD_to_pi.m,
+ BNT/CPDs/@gaussian_CPD/CPD_to_scgpot.m,
+ BNT/CPDs/@gaussian_CPD/adjustable_CPD.m,
+ BNT/CPDs/@gaussian_CPD/convert_CPD_to_table_hidden_ps.m,
+ BNT/CPDs/@gaussian_CPD/display.m,
+ BNT/CPDs/@gaussian_CPD/get_field.m,
+ BNT/CPDs/@gaussian_CPD/reset_ess.m,
+ BNT/CPDs/@gaussian_CPD/sample_node.m,
+ BNT/CPDs/@gaussian_CPD/set_fields.m,
+ BNT/CPDs/@gaussian_CPD/Old/CPD_to_lambda_msg.m,
+ BNT/CPDs/@gaussian_CPD/Old/gaussian_CPD.m,
+ BNT/CPDs/@gaussian_CPD/Old/log_prob_node.m,
+ BNT/CPDs/@gaussian_CPD/Old/update_ess.m,
+ BNT/CPDs/@gaussian_CPD/Old/update_tied_ess.m,
+ BNT/CPDs/@gaussian_CPD/private/CPD_to_linear_gaussian.m,
+ BNT/CPDs/@generic_CPD/README,
+ BNT/CPDs/@generic_CPD/adjustable_CPD.m,
+ BNT/CPDs/@generic_CPD/display.m,
+ BNT/CPDs/@generic_CPD/generic_CPD.m,
+ BNT/CPDs/@generic_CPD/log_prior.m,
+ BNT/CPDs/@generic_CPD/set_clamped.m,
+ BNT/CPDs/@generic_CPD/Old/BIC_score_CPD.m,
+ BNT/CPDs/@generic_CPD/Old/CPD_to_dpots.m,
+ BNT/CPDs/@gmux_CPD/CPD_to_lambda_msg.m,
+ BNT/CPDs/@gmux_CPD/convert_to_pot.m,
+ BNT/CPDs/@gmux_CPD/CPD_to_pi.m, BNT/CPDs/@gmux_CPD/display.m,
+ BNT/CPDs/@gmux_CPD/gmux_CPD.m, BNT/CPDs/@gmux_CPD/sample_node.m,
+ BNT/CPDs/@gmux_CPD/Old/gmux_CPD.m,
+ BNT/CPDs/@hhmmF_CPD/log_prior.m,
+ BNT/CPDs/@hhmmF_CPD/maximize_params.m,
+ BNT/CPDs/@hhmmF_CPD/reset_ess.m, BNT/CPDs/@hhmmQ_CPD/log_prior.m,
+ BNT/CPDs/@hhmmQ_CPD/reset_ess.m,
+ BNT/CPDs/@mlp_CPD/convert_to_table.m,
+ BNT/CPDs/@mlp_CPD/maximize_params.m, BNT/CPDs/@mlp_CPD/mlp_CPD.m,
+ BNT/CPDs/@mlp_CPD/reset_ess.m, BNT/CPDs/@mlp_CPD/update_ess.m,
+ BNT/CPDs/@noisyor_CPD/CPD_to_lambda_msg.m,
+ BNT/CPDs/@noisyor_CPD/CPD_to_pi.m,
+ BNT/CPDs/@noisyor_CPD/noisyor_CPD.m,
+ BNT/CPDs/@noisyor_CPD/private/sum_prod_CPD_and_pi_msgs.m,
+ BNT/CPDs/@root_CPD/CPD_to_pi.m,
+ BNT/CPDs/@root_CPD/convert_to_pot.m,
+ BNT/CPDs/@root_CPD/log_marg_prob_node.m,
+ BNT/CPDs/@root_CPD/log_prob_node.m,
+ BNT/CPDs/@root_CPD/root_CPD.m, BNT/CPDs/@root_CPD/sample_node.m,
+ BNT/CPDs/@root_CPD/Old/CPD_to_CPT.m,
+ BNT/CPDs/@softmax_CPD/convert_to_pot.m,
+ BNT/CPDs/@softmax_CPD/display.m,
+ BNT/CPDs/@softmax_CPD/get_field.m,
+ BNT/CPDs/@softmax_CPD/maximize_params.m,
+ BNT/CPDs/@softmax_CPD/reset_ess.m,
+ BNT/CPDs/@softmax_CPD/sample_node.m,
+ BNT/CPDs/@softmax_CPD/set_fields.m,
+ BNT/CPDs/@softmax_CPD/update_ess.m,
+ BNT/CPDs/@softmax_CPD/private/extract_params.m,
+ BNT/CPDs/@tabular_CPD/CPD_to_CPT.m,
+ BNT/CPDs/@tabular_CPD/bayes_update_params.m,
+ BNT/CPDs/@tabular_CPD/log_nextcase_prob_node.m,
+ BNT/CPDs/@tabular_CPD/log_prior.m,
+ BNT/CPDs/@tabular_CPD/reset_ess.m,
+ BNT/CPDs/@tabular_CPD/update_ess.m,
+ BNT/CPDs/@tabular_CPD/update_ess_simple.m,
+ BNT/CPDs/@tabular_CPD/Old/BIC_score_CPD.m,
+ BNT/CPDs/@tabular_CPD/Old/bayesian_score_CPD.m,
+ BNT/CPDs/@tabular_CPD/Old/log_marg_prob_node_case.m,
+ BNT/CPDs/@tabular_CPD/Old/mult_CPD_and_pi_msgs.m,
+ BNT/CPDs/@tabular_CPD/Old/prob_CPT.m,
+ BNT/CPDs/@tabular_CPD/Old/prob_node.m,
+ BNT/CPDs/@tabular_CPD/Old/sample_node.m,
+ BNT/CPDs/@tabular_CPD/Old/sample_node_single_case.m,
+ BNT/CPDs/@tabular_CPD/Old/tabular_CPD.m,
+ BNT/CPDs/@tabular_CPD/Old/update_params.m,
+ BNT/CPDs/@tabular_decision_node/CPD_to_CPT.m,
+ BNT/CPDs/@tabular_decision_node/display.m,
+ BNT/CPDs/@tabular_decision_node/get_field.m,
+ BNT/CPDs/@tabular_decision_node/set_fields.m,
+ BNT/CPDs/@tabular_decision_node/tabular_decision_node.m,
+ BNT/CPDs/@tabular_decision_node/Old/tabular_decision_node.m,
+ BNT/CPDs/@tabular_kernel/convert_to_pot.m,
+ BNT/CPDs/@tabular_kernel/convert_to_table.m,
+ BNT/CPDs/@tabular_kernel/get_field.m,
+ BNT/CPDs/@tabular_kernel/set_fields.m,
+ BNT/CPDs/@tabular_kernel/tabular_kernel.m,
+ BNT/CPDs/@tabular_kernel/Old/tabular_kernel.m,
+ BNT/CPDs/@tabular_utility_node/convert_to_pot.m,
+ BNT/CPDs/@tabular_utility_node/display.m,
+ BNT/CPDs/@tabular_utility_node/tabular_utility_node.m,
+ BNT/CPDs/@tree_CPD/display.m,
+ BNT/CPDs/@tree_CPD/evaluate_tree_performance.m,
+ BNT/CPDs/@tree_CPD/get_field.m,
+ BNT/CPDs/@tree_CPD/learn_params.m, BNT/CPDs/@tree_CPD/readme.txt,
+ BNT/CPDs/@tree_CPD/set_fields.m, BNT/CPDs/@tree_CPD/tree_CPD.m,
+ BNT/CPDs/Old/@linear_gaussian_CPD/linear_gaussian_CPD.m,
+ BNT/CPDs/Old/@linear_gaussian_CPD/log_marg_prob_node.m,
+ BNT/CPDs/Old/@linear_gaussian_CPD/update_params_complete.m,
+ BNT/CPDs/Old/@root_gaussian_CPD/log_marg_prob_node.m,
+ BNT/CPDs/Old/@root_gaussian_CPD/root_gaussian_CPD.m,
+ BNT/CPDs/Old/@root_gaussian_CPD/update_params_complete.m,
+ BNT/CPDs/Old/@tabular_chance_node/CPD_to_upot.m,
+ BNT/CPDs/Old/@tabular_chance_node/tabular_chance_node.m,
+ BNT/examples/dynamic/bat1.m, BNT/examples/dynamic/bkff1.m,
+ BNT/examples/dynamic/chmm1.m,
+ BNT/examples/dynamic/cmp_inference_dbn.m,
+ BNT/examples/dynamic/cmp_learning_dbn.m,
+ BNT/examples/dynamic/cmp_online_inference.m,
+ BNT/examples/dynamic/fhmm_infer.m,
+ BNT/examples/dynamic/filter_test1.m,
+ BNT/examples/dynamic/kalman1.m,
+ BNT/examples/dynamic/kjaerulff1.m,
+ BNT/examples/dynamic/loopy_dbn1.m,
+ BNT/examples/dynamic/mk_collage_from_clqs.m,
+ BNT/examples/dynamic/mk_fhmm.m, BNT/examples/dynamic/reveal1.m,
+ BNT/examples/dynamic/scg_dbn.m,
+ BNT/examples/dynamic/skf_data_assoc_gmux.m,
+ BNT/examples/dynamic/HHMM/add_hhmm_end_state.m,
+ BNT/examples/dynamic/HHMM/hhmm_jtree_clqs.m,
+ BNT/examples/dynamic/HHMM/mk_hhmm_topo.m,
+ BNT/examples/dynamic/HHMM/mk_hhmm_topo_F1.m,
+ BNT/examples/dynamic/HHMM/pretty_print_hhmm_parse.m,
+ BNT/examples/dynamic/HHMM/Motif/fixed_args_mk_motif_hhmm.m,
+ BNT/examples/dynamic/HHMM/Motif/mk_motif_hhmm.m,
+ BNT/examples/dynamic/HHMM/Motif/sample_motif_hhmm.m,
+ BNT/examples/dynamic/HHMM/Old/mk_abcd_hhmm.m,
+ BNT/examples/dynamic/HHMM/Old/mk_arrow_alpha_hhmm3.m,
+ BNT/examples/dynamic/HHMM/Old/mk_hhmm2.m,
+ BNT/examples/dynamic/HHMM/Old/mk_hhmm3.m,
+ BNT/examples/dynamic/HHMM/Old/mk_hhmm3_args.m,
+ BNT/examples/dynamic/HHMM/Old/motif_hhmm.m,
+ BNT/examples/dynamic/HHMM/Old/remove_hhmm_end_state.m,
+ BNT/examples/dynamic/HHMM/Square/get_square_data.m,
+ BNT/examples/dynamic/HHMM/Square/hhmm_inference.m,
+ BNT/examples/dynamic/HHMM/Square/is_F2_true_D3.m,
+ BNT/examples/dynamic/HHMM/Square/learn_square_hhmm_discrete.m,
+ BNT/examples/dynamic/HHMM/Square/mk_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/plot_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/sample_square_hhmm_cts.m,
+ BNT/examples/dynamic/HHMM/Square/sample_square_hhmm_discrete.m,
+ BNT/examples/dynamic/HHMM/Square/square4.mat,
+ BNT/examples/dynamic/HHMM/Square/square4_cases.mat,
+ BNT/examples/dynamic/HHMM/Square/test_square_fig.m,
+ BNT/examples/dynamic/HHMM/Square/test_square_fig.mat,
+ BNT/examples/dynamic/HHMM/Square/Old/learn_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/Old/mk_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/Old/plot_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/Old/sample_square_hhmm.m,
+ BNT/examples/dynamic/Old/chmm1.m,
+ BNT/examples/dynamic/Old/cmp_inference.m,
+ BNT/examples/dynamic/Old/kalman1.m,
+ BNT/examples/dynamic/Old/old.water1.m,
+ BNT/examples/dynamic/Old/online1.m,
+ BNT/examples/dynamic/Old/online2.m,
+ BNT/examples/dynamic/Old/scg_dbn.m,
+ BNT/examples/dynamic/SLAM/mk_gmux_robot_dbn.m,
+ BNT/examples/dynamic/SLAM/mk_linear_slam.m,
+ BNT/examples/dynamic/SLAM/slam_kf.m,
+ BNT/examples/dynamic/SLAM/slam_offline_loopy.m,
+ BNT/examples/dynamic/SLAM/slam_partial_kf.m,
+ BNT/examples/dynamic/SLAM/slam_stationary_loopy.m,
+ BNT/examples/dynamic/SLAM/Old/offline_loopy_slam.m,
+ BNT/examples/dynamic/SLAM/Old/paskin1.m,
+ BNT/examples/dynamic/SLAM/Old/skf_data_assoc_gmux2.m,
+ BNT/examples/dynamic/SLAM/Old/slam_kf.m,
+ BNT/examples/limids/id1.m, BNT/examples/limids/pigs1.m,
+ BNT/examples/static/cg1.m, BNT/examples/static/cg2.m,
+ BNT/examples/static/discrete2.m, BNT/examples/static/discrete3.m,
+ BNT/examples/static/fa1.m, BNT/examples/static/gaussian1.m,
+ BNT/examples/static/gibbs_test1.m, BNT/examples/static/lw1.m,
+ BNT/examples/static/mfa1.m, BNT/examples/static/mixexp1.m,
+ BNT/examples/static/mixexp2.m, BNT/examples/static/mixexp3.m,
+ BNT/examples/static/mog1.m, BNT/examples/static/qmr1.m,
+ BNT/examples/static/sample1.m, BNT/examples/static/softmax1.m,
+ BNT/examples/static/Belprop/belprop_loop1_discrete.m,
+ BNT/examples/static/Belprop/belprop_loop1_gauss.m,
+ BNT/examples/static/Belprop/belprop_loopy_cg.m,
+ BNT/examples/static/Belprop/belprop_loopy_discrete.m,
+ BNT/examples/static/Belprop/belprop_loopy_gauss.m,
+ BNT/examples/static/Belprop/belprop_polytree_cg.m,
+ BNT/examples/static/Belprop/belprop_polytree_gauss.m,
+ BNT/examples/static/Belprop/bp1.m,
+ BNT/examples/static/Belprop/gmux1.m,
+ BNT/examples/static/Brutti/Belief_IOhmm.m,
+ BNT/examples/static/Brutti/Belief_hmdt.m,
+ BNT/examples/static/Brutti/Belief_hme.m,
+ BNT/examples/static/Brutti/Sigmoid_Belief.m,
+ BNT/examples/static/HME/HMEforMatlab.jpg,
+ BNT/examples/static/HME/README, BNT/examples/static/HME/fhme.m,
+ BNT/examples/static/HME/gen_data.m,
+ BNT/examples/static/HME/hme_class_plot.m,
+ BNT/examples/static/HME/hme_reg_plot.m,
+ BNT/examples/static/HME/hme_topobuilder.m,
+ BNT/examples/static/HME/test_data_class.mat,
+ BNT/examples/static/HME/test_data_class2.mat,
+ BNT/examples/static/HME/test_data_reg.mat,
+ BNT/examples/static/HME/train_data_class.mat,
+ BNT/examples/static/HME/train_data_reg.mat,
+ BNT/examples/static/Misc/mixexp_data.txt,
+ BNT/examples/static/Misc/mixexp_graddesc.m,
+ BNT/examples/static/Misc/mixexp_plot.m,
+ BNT/examples/static/Misc/sprinkler.bif,
+ BNT/examples/static/Models/mk_cancer_bnet.m,
+ BNT/examples/static/Models/mk_car_bnet.m,
+ BNT/examples/static/Models/mk_ideker_bnet.m,
+ BNT/examples/static/Models/mk_incinerator_bnet.m,
+ BNT/examples/static/Models/mk_markov_chain_bnet.m,
+ BNT/examples/static/Models/mk_minimal_qmr_bnet.m,
+ BNT/examples/static/Models/mk_qmr_bnet.m,
+ BNT/examples/static/Models/mk_vstruct_bnet.m,
+ BNT/examples/static/Models/Old/mk_hmm_bnet.m,
+ BNT/examples/static/SCG/scg1.m, BNT/examples/static/SCG/scg2.m,
+ BNT/examples/static/SCG/scg3.m,
+ BNT/examples/static/SCG/scg_3node.m,
+ BNT/examples/static/SCG/scg_unstable.m,
+ BNT/examples/static/StructLearn/bic1.m,
+ BNT/examples/static/StructLearn/cooper_yoo.m,
+ BNT/examples/static/StructLearn/k2demo1.m,
+ BNT/examples/static/StructLearn/mcmc1.m,
+ BNT/examples/static/StructLearn/pc1.m,
+ BNT/examples/static/StructLearn/pc2.m,
+ BNT/examples/static/Zoubin/README,
+ BNT/examples/static/Zoubin/csum.m,
+ BNT/examples/static/Zoubin/ffa.m,
+ BNT/examples/static/Zoubin/mfa.m,
+ BNT/examples/static/Zoubin/mfa_cl.m,
+ BNT/examples/static/Zoubin/mfademo.m,
+ BNT/examples/static/Zoubin/rdiv.m,
+ BNT/examples/static/Zoubin/rprod.m,
+ BNT/examples/static/Zoubin/rsum.m,
+ BNT/examples/static/dtree/test_housing.m,
+ BNT/examples/static/dtree/test_restaurants.m,
+ BNT/examples/static/dtree/test_zoo1.m,
+ BNT/examples/static/dtree/tmp.dot,
+ BNT/examples/static/dtree/transform_data_into_bnt_format.m,
+ BNT/examples/static/fgraph/fg2.m,
+ BNT/examples/static/fgraph/fg3.m,
+ BNT/examples/static/fgraph/fg_mrf1.m,
+ BNT/examples/static/fgraph/fg_mrf2.m,
+ BNT/general/bnet_to_fgraph.m,
+ BNT/general/compute_fwd_interface.m,
+ BNT/general/compute_interface_nodes.m,
+ BNT/general/compute_minimal_interface.m,
+ BNT/general/dbn_to_bnet.m,
+ BNT/general/determine_elim_constraints.m,
+ BNT/general/do_intervention.m, BNT/general/dsep.m,
+ BNT/general/enumerate_scenarios.m, BNT/general/fgraph_to_bnet.m,
+ BNT/general/log_lik_complete.m,
+ BNT/general/log_marg_lik_complete.m, BNT/general/mk_bnet.m,
+ BNT/general/mk_fgraph.m, BNT/general/mk_limid.m,
+ BNT/general/mk_mutilated_samples.m,
+ BNT/general/mk_slice_and_half_dbn.m,
+ BNT/general/partition_dbn_nodes.m,
+ BNT/general/sample_bnet_nocell.m, BNT/general/sample_dbn.m,
+ BNT/general/score_bnet_complete.m,
+ BNT/general/unroll_dbn_topology.m,
+ BNT/general/Old/bnet_to_gdl_graph.m,
+ BNT/general/Old/calc_mpe_bucket.m,
+ BNT/general/Old/calc_mpe_dbn.m,
+ BNT/general/Old/calc_mpe_given_inf_engine.m,
+ BNT/general/Old/calc_mpe_global.m,
+ BNT/general/Old/compute_interface_nodes.m,
+ BNT/general/Old/mk_gdl_graph.m, GraphViz/draw_dbn.m,
+ GraphViz/make_layout.m, BNT/license.gpl.txt,
+ BNT/general/add_evidence_to_gmarginal.m,
+ BNT/inference/@inf_engine/bnet_from_engine.m,
+ BNT/inference/@inf_engine/get_field.m,
+ BNT/inference/@inf_engine/inf_engine.m,
+ BNT/inference/@inf_engine/marginal_family.m,
+ BNT/inference/@inf_engine/set_fields.m,
+ BNT/inference/@inf_engine/update_engine.m,
+ BNT/inference/@inf_engine/Old/marginal_family_pot.m,
+ BNT/inference/@inf_engine/Old/observed_nodes.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/bk_ff_hmm_inf_engine.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_init_bel.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_marginal_from_bel.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_predict_bel.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel1.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/bk_ff_fb.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/combine_marginals_into_joint.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/dbn_to_hmm.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_mat.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec1.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/project_joint_onto_marginals.m,
+ BNT/inference/dynamic/@bk_inf_engine/bk_inf_engine.m,
+ BNT/inference/dynamic/@bk_inf_engine/dbn_init_bel.m,
+ BNT/inference/dynamic/@bk_inf_engine/dbn_marginal_from_bel.m,
+ BNT/inference/dynamic/@bk_inf_engine/dbn_update_bel.m,
+ BNT/inference/dynamic/@bk_inf_engine/dbn_update_bel1.m,
+ BNT/inference/dynamic/@bk_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@bk_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@bk_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@bk_inf_engine/update_engine.m,
+ BNT/inference/dynamic/@ff_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/enter_soft_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/ff_inf_engine.m,
+ BNT/inference/dynamic/@ff_inf_engine/filter_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@ff_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@ff_inf_engine/smooth_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence1.m,
+ BNT/inference/dynamic/@ff_inf_engine/Old/marginal_family.m,
+ BNT/inference/dynamic/@frontier_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@frontier_inf_engine/enter_soft_evidence.m,
+ BNT/inference/dynamic/@frontier_inf_engine/frontier_inf_engine.m,
+ BNT/inference/dynamic/@frontier_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@frontier_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@frontier_inf_engine/set_fwdback.m,
+ BNT/inference/dynamic/@hmm_inf_engine/update_engine.m,
+ BNT/inference/dynamic/@hmm_inf_engine/Old/dhmm_inf_engine.m,
+ BNT/inference/dynamic/@hmm_inf_engine/Old/marginal_family.m,
+ BNT/inference/dynamic/@hmm_inf_engine/Old/marginal_nodes.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence1.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence2.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence3.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence4.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/marginal_nodes.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_nonint.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_trans.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine1.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine2.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/jtree_unrolled_dbn_inf_engine.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/update_engine.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_family.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_nodes.m,
+ BNT/inference/dynamic/@kalman_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@kalman_inf_engine/kalman_inf_engine.m,
+ BNT/inference/dynamic/@kalman_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@kalman_inf_engine/update_engine.m,
+ BNT/inference/dynamic/@kalman_inf_engine/private/dbn_to_lds.m,
+ BNT/inference/dynamic/@kalman_inf_engine/private/extract_params_from_gbn.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/enter_soft_ev.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/pearl_dbn_inf_engine.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/correct_smooth.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/enter_evidence.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence_obj_oriented.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence_fast.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/wrong_smooth.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/private/init_pearl_dbn_ev_msgs.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/pearl_unrolled_dbn_inf_engine.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/update_engine.m,
+ BNT/inference/online/@filter_engine/bnet_from_engine.m,
+ BNT/inference/online/@filter_engine/enter_evidence.m,
+ BNT/inference/online/@filter_engine/filter_engine.m,
+ BNT/inference/online/@filter_engine/marginal_family.m,
+ BNT/inference/online/@filter_engine/marginal_nodes.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/back.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/backT.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/fwd.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/fwd1.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/update_engine.m,
+ BNT/inference/online/@jtree_2TBN_inf_engine/marginal_family.m,
+ BNT/inference/online/@jtree_2TBN_inf_engine/marginal_nodes.m,
+ BNT/inference/online/@jtree_2TBN_inf_engine/Old/jtree_2TBN_inf_engine.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/back.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/back1.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/backT.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/enter_evidence.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/fwd.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/fwd1.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/jtree_sparse_2TBN_inf_engine.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_family.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_nodes.m,
+ BNT/inference/online/@smoother_engine/bnet_from_engine.m,
+ BNT/inference/online/@smoother_engine/marginal_family.m,
+ BNT/inference/online/@smoother_engine/marginal_nodes.m,
+ BNT/inference/online/@smoother_engine/smoother_engine.m,
+ BNT/inference/online/@smoother_engine/update_engine.m,
+ BNT/inference/static/@belprop_fg_inf_engine/belprop_fg_inf_engine.m,
+ BNT/inference/static/@belprop_fg_inf_engine/enter_evidence.m,
+ BNT/inference/static/@belprop_fg_inf_engine/loopy_converged.m,
+ BNT/inference/static/@belprop_fg_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@belprop_fg_inf_engine/set_params.m,
+ BNT/inference/static/@belprop_inf_engine/enter_evidence.m,
+ BNT/inference/static/@belprop_inf_engine/loopy_converged.m,
+ BNT/inference/static/@belprop_inf_engine/marginal_family.m,
+ BNT/inference/static/@belprop_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@belprop_inf_engine/Old/belprop_gdl_inf_engine.m,
+ BNT/inference/static/@belprop_inf_engine/Old/belprop_inf_engine_nostr.m,
+ BNT/inference/static/@belprop_inf_engine/Old/enter_evidence.m,
+ BNT/inference/static/@belprop_inf_engine/Old/enter_evidence1.m,
+ BNT/inference/static/@belprop_inf_engine/Old/marginal_domain.m,
+ BNT/inference/static/@belprop_inf_engine/private/junk,
+ BNT/inference/static/@belprop_inf_engine/private/parallel_protocol.m,
+ BNT/inference/static/@belprop_inf_engine/private/tree_protocol.m,
+ BNT/inference/static/@cond_gauss_inf_engine/cond_gauss_inf_engine.m,
+ BNT/inference/static/@cond_gauss_inf_engine/enter_evidence.m,
+ BNT/inference/static/@cond_gauss_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@enumerative_inf_engine/enter_evidence.m,
+ BNT/inference/static/@enumerative_inf_engine/enumerative_inf_engine.m,
+ BNT/inference/static/@enumerative_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@gaussian_inf_engine/enter_evidence.m,
+ BNT/inference/static/@gaussian_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@gaussian_inf_engine/private/extract_params_from_gbn.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/enter_evidence.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/gibbs_sampling_inf_engine.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/CPT.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_children.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_families.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_families_dbn.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior.c,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior_dbn.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_strides.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/get_cpts.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.c,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/my_sample_discrete.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/sample_single_discrete.c,
+ BNT/inference/static/@global_joint_inf_engine/global_joint_inf_engine.m,
+ BNT/inference/static/@global_joint_inf_engine/marginal_family.m,
+ BNT/inference/static/@jtree_inf_engine/cliques_from_engine.m,
+ BNT/inference/static/@jtree_inf_engine/clq_containing_nodes.m,
+ BNT/inference/static/@jtree_inf_engine/collect_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/enter_soft_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/marginal_family.m,
+ BNT/inference/static/@jtree_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@jtree_inf_engine/set_fields.m,
+ BNT/inference/static/@jtree_inf_engine/Old/collect_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/Old/distribute_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/Old/enter_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/Old/enter_soft_evidence.m,
+ BNT/inference/static/@jtree_limid_inf_engine/enter_evidence.m,
+ BNT/inference/static/@jtree_limid_inf_engine/jtree_limid_inf_engine.m,
+ BNT/inference/static/@jtree_limid_inf_engine/marginal_family.m,
+ BNT/inference/static/@jtree_limid_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@jtree_limid_inf_engine/Old/marginal_family.m,
+ BNT/inference/static/@jtree_limid_inf_engine/Old/marginal_nodes_SS.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/cliques_from_engine.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/clq_containing_nodes.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/collect_evidence.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/distribute_evidence.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/enter_evidence.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/enter_soft_evidence.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/init_pot.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/marginal_family.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/set_fields.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/collect_evidence.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/distribute_evidence.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot1.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot1.m,
+ BNT/inference/static/@likelihood_weighting_inf_engine/enter_evidence.m,
+ BNT/inference/static/@likelihood_weighting_inf_engine/likelihood_weighting_inf_engine.m,
+ BNT/inference/static/@likelihood_weighting_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@pearl_inf_engine/enter_evidence.m,
+ BNT/inference/static/@pearl_inf_engine/loopy_converged.m,
+ BNT/inference/static/@pearl_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@pearl_inf_engine/private/compute_bel.m,
+ BNT/inference/static/@pearl_inf_engine/private/prod_lambda_msgs.m,
+ BNT/inference/static/@pearl_inf_engine/private/tree_protocol.m,
+ BNT/inference/static/@quickscore_inf_engine/enter_evidence.m,
+ BNT/inference/static/@quickscore_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@quickscore_inf_engine/quickscore_inf_engine.m,
+ BNT/inference/static/@quickscore_inf_engine/private/C_quickscore.c,
+ BNT/inference/static/@quickscore_inf_engine/private/nr.h,
+ BNT/inference/static/@quickscore_inf_engine/private/nrutil.c,
+ BNT/inference/static/@quickscore_inf_engine/private/nrutil.h,
+ BNT/inference/static/@quickscore_inf_engine/private/quickscore.m,
+ BNT/learning/bayes_update_params.m,
+ BNT/learning/bic_score_family.m,
+ BNT/learning/compute_cooling_schedule.m,
+ BNT/learning/dirichlet_score_family.m,
+ BNT/learning/kpm_learn_struct_mcmc.m,
+ BNT/learning/learn_params_em.m,
+ BNT/learning/learn_struct_dbn_reveal.m,
+ BNT/learning/learn_struct_pdag_ic_star.m,
+ BNT/learning/mcmc_sample_to_hist.m, BNT/learning/mk_schedule.m,
+ BNT/learning/mk_tetrad_data_file.m,
+ BNT/learning/score_dags_old.m, HMM/dhmm_logprob_brute_force.m,
+ HMM/dhmm_logprob_path.m, HMM/mdp_sample.m, Kalman/AR_to_SS.m,
+ Kalman/SS_to_AR.m, Kalman/convert_to_lagged_form.m,
+ Kalman/ensure_AR.m, Kalman/eval_AR_perf.m,
+ Kalman/kalman_filter.m, Kalman/kalman_smoother.m,
+ Kalman/kalman_update.m, Kalman/learn_AR.m,
+ Kalman/learn_AR_diagonal.m, Kalman/learn_kalman.m,
+ Kalman/smooth_update.m,
+ BNT/general/convert_dbn_CPDs_to_tables_slow.m,
+ BNT/general/dispcpt.m, BNT/general/linear_gaussian_to_cpot.m,
+ BNT/general/partition_matrix_vec_3.m,
+ BNT/general/shrink_obs_dims_in_gaussian.m,
+ BNT/general/shrink_obs_dims_in_table.m,
+ BNT/potentials/CPD_to_pot.m, BNT/potentials/README,
+ BNT/potentials/check_for_cd_arcs.m,
+ BNT/potentials/determine_pot_type.m,
+ BNT/potentials/mk_initial_pot.m,
+ BNT/potentials/@cgpot/cg_can_to_mom.m,
+ BNT/potentials/@cgpot/cg_mom_to_can.m,
+ BNT/potentials/@cgpot/cgpot.m, BNT/potentials/@cgpot/display.m,
+ BNT/potentials/@cgpot/divide_by_pot.m,
+ BNT/potentials/@cgpot/domain_pot.m,
+ BNT/potentials/@cgpot/enter_cts_evidence_pot.m,
+ BNT/potentials/@cgpot/enter_discrete_evidence_pot.m,
+ BNT/potentials/@cgpot/marginalize_pot.m,
+ BNT/potentials/@cgpot/multiply_by_pot.m,
+ BNT/potentials/@cgpot/multiply_pots.m,
+ BNT/potentials/@cgpot/normalize_pot.m,
+ BNT/potentials/@cgpot/pot_to_marginal.m,
+ BNT/potentials/@cgpot/Old/normalize_pot.m,
+ BNT/potentials/@cgpot/Old/simple_marginalize_pot.m,
+ BNT/potentials/@cpot/cpot.m, BNT/potentials/@cpot/cpot_to_mpot.m,
+ BNT/potentials/@cpot/display.m,
+ BNT/potentials/@cpot/divide_by_pot.m,
+ BNT/potentials/@cpot/domain_pot.m,
+ BNT/potentials/@cpot/enter_cts_evidence_pot.m,
+ BNT/potentials/@cpot/marginalize_pot.m,
+ BNT/potentials/@cpot/multiply_by_pot.m,
+ BNT/potentials/@cpot/multiply_pots.m,
+ BNT/potentials/@cpot/normalize_pot.m,
+ BNT/potentials/@cpot/pot_to_marginal.m,
+ BNT/potentials/@cpot/rescale_pot.m,
+ BNT/potentials/@cpot/set_domain_pot.m,
+ BNT/potentials/@cpot/Old/cpot_to_mpot.m,
+ BNT/potentials/@cpot/Old/normalize_pot.convert.m,
+ BNT/potentials/@dpot/approxeq_pot.m,
+ BNT/potentials/@dpot/display.m,
+ BNT/potentials/@dpot/domain_pot.m,
+ BNT/potentials/@dpot/dpot_to_table.m,
+ BNT/potentials/@dpot/get_fields.m,
+ BNT/potentials/@dpot/multiply_pots.m,
+ BNT/potentials/@dpot/pot_to_marginal.m,
+ BNT/potentials/@dpot/set_domain_pot.m,
+ BNT/potentials/@mpot/display.m,
+ BNT/potentials/@mpot/marginalize_pot.m,
+ BNT/potentials/@mpot/mpot.m, BNT/potentials/@mpot/mpot_to_cpot.m,
+ BNT/potentials/@mpot/normalize_pot.m,
+ BNT/potentials/@mpot/pot_to_marginal.m,
+ BNT/potentials/@mpot/rescale_pot.m,
+ BNT/potentials/@upot/approxeq_pot.m,
+ BNT/potentials/@upot/display.m,
+ BNT/potentials/@upot/divide_by_pot.m,
+ BNT/potentials/@upot/marginalize_pot.m,
+ BNT/potentials/@upot/multiply_by_pot.m,
+ BNT/potentials/@upot/normalize_pot.m,
+ BNT/potentials/@upot/pot_to_marginal.m,
+ BNT/potentials/@upot/upot.m,
+ BNT/potentials/@upot/upot_to_opt_policy.m,
+ BNT/potentials/Old/comp_eff_node_sizes.m,
+ BNT/potentials/Tables/divide_by_sparse_table.c,
+ BNT/potentials/Tables/divide_by_table.c,
+ BNT/potentials/Tables/marg_sparse_table.c,
+ BNT/potentials/Tables/marg_table.c,
+ BNT/potentials/Tables/mult_by_sparse_table.c,
+ BNT/potentials/Tables/rep_mult.c, HMM/mk_leftright_transmat.m:
+ Initial import of code base from Kevin Murphy.
+
+2002-05-29 08:59 yozhik
+
+ * BNT/@assocarray/assocarray.m,
+ BNT/CPDs/@boolean_CPD/boolean_CPD.m,
+ BNT/CPDs/@discrete_CPD/CPD_to_lambda_msg.m,
+ BNT/CPDs/@discrete_CPD/CPD_to_pi.m,
+ BNT/CPDs/@discrete_CPD/CPD_to_scgpot.m,
+ BNT/CPDs/@discrete_CPD/README,
+ BNT/CPDs/@discrete_CPD/convert_CPD_to_table_hidden_ps.m,
+ BNT/CPDs/@discrete_CPD/convert_obs_CPD_to_table.m,
+ BNT/CPDs/@discrete_CPD/convert_to_sparse_table.c,
+ BNT/CPDs/@discrete_CPD/convert_to_table.m,
+ BNT/CPDs/@discrete_CPD/discrete_CPD.m,
+ BNT/CPDs/@discrete_CPD/dom_sizes.m,
+ BNT/CPDs/@discrete_CPD/log_prob_node.m,
+ BNT/CPDs/@discrete_CPD/prob_node.m,
+ BNT/CPDs/@discrete_CPD/sample_node.m,
+ BNT/CPDs/@discrete_CPD/Old/convert_to_pot.m,
+ BNT/CPDs/@discrete_CPD/Old/convert_to_table.m,
+ BNT/CPDs/@discrete_CPD/Old/prob_CPD.m,
+ BNT/CPDs/@discrete_CPD/Old/prob_node.m,
+ BNT/CPDs/@discrete_CPD/private/prod_CPT_and_pi_msgs.m,
+ BNT/CPDs/@gaussian_CPD/CPD_to_lambda_msg.m,
+ BNT/CPDs/@gaussian_CPD/CPD_to_pi.m,
+ BNT/CPDs/@gaussian_CPD/CPD_to_scgpot.m,
+ BNT/CPDs/@gaussian_CPD/adjustable_CPD.m,
+ BNT/CPDs/@gaussian_CPD/convert_CPD_to_table_hidden_ps.m,
+ BNT/CPDs/@gaussian_CPD/display.m,
+ BNT/CPDs/@gaussian_CPD/get_field.m,
+ BNT/CPDs/@gaussian_CPD/reset_ess.m,
+ BNT/CPDs/@gaussian_CPD/sample_node.m,
+ BNT/CPDs/@gaussian_CPD/set_fields.m,
+ BNT/CPDs/@gaussian_CPD/Old/CPD_to_lambda_msg.m,
+ BNT/CPDs/@gaussian_CPD/Old/gaussian_CPD.m,
+ BNT/CPDs/@gaussian_CPD/Old/log_prob_node.m,
+ BNT/CPDs/@gaussian_CPD/Old/update_ess.m,
+ BNT/CPDs/@gaussian_CPD/Old/update_tied_ess.m,
+ BNT/CPDs/@gaussian_CPD/private/CPD_to_linear_gaussian.m,
+ BNT/CPDs/@generic_CPD/README,
+ BNT/CPDs/@generic_CPD/adjustable_CPD.m,
+ BNT/CPDs/@generic_CPD/display.m,
+ BNT/CPDs/@generic_CPD/generic_CPD.m,
+ BNT/CPDs/@generic_CPD/log_prior.m,
+ BNT/CPDs/@generic_CPD/set_clamped.m,
+ BNT/CPDs/@generic_CPD/Old/BIC_score_CPD.m,
+ BNT/CPDs/@generic_CPD/Old/CPD_to_dpots.m,
+ BNT/CPDs/@gmux_CPD/CPD_to_lambda_msg.m,
+ BNT/CPDs/@gmux_CPD/convert_to_pot.m,
+ BNT/CPDs/@gmux_CPD/CPD_to_pi.m, BNT/CPDs/@gmux_CPD/display.m,
+ BNT/CPDs/@gmux_CPD/gmux_CPD.m, BNT/CPDs/@gmux_CPD/sample_node.m,
+ BNT/CPDs/@gmux_CPD/Old/gmux_CPD.m,
+ BNT/CPDs/@hhmmF_CPD/log_prior.m,
+ BNT/CPDs/@hhmmF_CPD/maximize_params.m,
+ BNT/CPDs/@hhmmF_CPD/reset_ess.m, BNT/CPDs/@hhmmQ_CPD/log_prior.m,
+ BNT/CPDs/@hhmmQ_CPD/reset_ess.m,
+ BNT/CPDs/@mlp_CPD/convert_to_table.m,
+ BNT/CPDs/@mlp_CPD/maximize_params.m, BNT/CPDs/@mlp_CPD/mlp_CPD.m,
+ BNT/CPDs/@mlp_CPD/reset_ess.m, BNT/CPDs/@mlp_CPD/update_ess.m,
+ BNT/CPDs/@noisyor_CPD/CPD_to_lambda_msg.m,
+ BNT/CPDs/@noisyor_CPD/CPD_to_pi.m,
+ BNT/CPDs/@noisyor_CPD/noisyor_CPD.m,
+ BNT/CPDs/@noisyor_CPD/private/sum_prod_CPD_and_pi_msgs.m,
+ BNT/CPDs/@root_CPD/CPD_to_pi.m,
+ BNT/CPDs/@root_CPD/convert_to_pot.m,
+ BNT/CPDs/@root_CPD/log_marg_prob_node.m,
+ BNT/CPDs/@root_CPD/log_prob_node.m,
+ BNT/CPDs/@root_CPD/root_CPD.m, BNT/CPDs/@root_CPD/sample_node.m,
+ BNT/CPDs/@root_CPD/Old/CPD_to_CPT.m,
+ BNT/CPDs/@softmax_CPD/convert_to_pot.m,
+ BNT/CPDs/@softmax_CPD/display.m,
+ BNT/CPDs/@softmax_CPD/get_field.m,
+ BNT/CPDs/@softmax_CPD/maximize_params.m,
+ BNT/CPDs/@softmax_CPD/reset_ess.m,
+ BNT/CPDs/@softmax_CPD/sample_node.m,
+ BNT/CPDs/@softmax_CPD/set_fields.m,
+ BNT/CPDs/@softmax_CPD/update_ess.m,
+ BNT/CPDs/@softmax_CPD/private/extract_params.m,
+ BNT/CPDs/@tabular_CPD/CPD_to_CPT.m,
+ BNT/CPDs/@tabular_CPD/bayes_update_params.m,
+ BNT/CPDs/@tabular_CPD/log_nextcase_prob_node.m,
+ BNT/CPDs/@tabular_CPD/log_prior.m,
+ BNT/CPDs/@tabular_CPD/reset_ess.m,
+ BNT/CPDs/@tabular_CPD/update_ess.m,
+ BNT/CPDs/@tabular_CPD/update_ess_simple.m,
+ BNT/CPDs/@tabular_CPD/Old/BIC_score_CPD.m,
+ BNT/CPDs/@tabular_CPD/Old/bayesian_score_CPD.m,
+ BNT/CPDs/@tabular_CPD/Old/log_marg_prob_node_case.m,
+ BNT/CPDs/@tabular_CPD/Old/mult_CPD_and_pi_msgs.m,
+ BNT/CPDs/@tabular_CPD/Old/prob_CPT.m,
+ BNT/CPDs/@tabular_CPD/Old/prob_node.m,
+ BNT/CPDs/@tabular_CPD/Old/sample_node.m,
+ BNT/CPDs/@tabular_CPD/Old/sample_node_single_case.m,
+ BNT/CPDs/@tabular_CPD/Old/tabular_CPD.m,
+ BNT/CPDs/@tabular_CPD/Old/update_params.m,
+ BNT/CPDs/@tabular_decision_node/CPD_to_CPT.m,
+ BNT/CPDs/@tabular_decision_node/display.m,
+ BNT/CPDs/@tabular_decision_node/get_field.m,
+ BNT/CPDs/@tabular_decision_node/set_fields.m,
+ BNT/CPDs/@tabular_decision_node/tabular_decision_node.m,
+ BNT/CPDs/@tabular_decision_node/Old/tabular_decision_node.m,
+ BNT/CPDs/@tabular_kernel/convert_to_pot.m,
+ BNT/CPDs/@tabular_kernel/convert_to_table.m,
+ BNT/CPDs/@tabular_kernel/get_field.m,
+ BNT/CPDs/@tabular_kernel/set_fields.m,
+ BNT/CPDs/@tabular_kernel/tabular_kernel.m,
+ BNT/CPDs/@tabular_kernel/Old/tabular_kernel.m,
+ BNT/CPDs/@tabular_utility_node/convert_to_pot.m,
+ BNT/CPDs/@tabular_utility_node/display.m,
+ BNT/CPDs/@tabular_utility_node/tabular_utility_node.m,
+ BNT/CPDs/@tree_CPD/display.m,
+ BNT/CPDs/@tree_CPD/evaluate_tree_performance.m,
+ BNT/CPDs/@tree_CPD/get_field.m,
+ BNT/CPDs/@tree_CPD/learn_params.m, BNT/CPDs/@tree_CPD/readme.txt,
+ BNT/CPDs/@tree_CPD/set_fields.m, BNT/CPDs/@tree_CPD/tree_CPD.m,
+ BNT/CPDs/Old/@linear_gaussian_CPD/linear_gaussian_CPD.m,
+ BNT/CPDs/Old/@linear_gaussian_CPD/log_marg_prob_node.m,
+ BNT/CPDs/Old/@linear_gaussian_CPD/update_params_complete.m,
+ BNT/CPDs/Old/@root_gaussian_CPD/log_marg_prob_node.m,
+ BNT/CPDs/Old/@root_gaussian_CPD/root_gaussian_CPD.m,
+ BNT/CPDs/Old/@root_gaussian_CPD/update_params_complete.m,
+ BNT/CPDs/Old/@tabular_chance_node/CPD_to_upot.m,
+ BNT/CPDs/Old/@tabular_chance_node/tabular_chance_node.m,
+ BNT/examples/dynamic/bat1.m, BNT/examples/dynamic/bkff1.m,
+ BNT/examples/dynamic/chmm1.m,
+ BNT/examples/dynamic/cmp_inference_dbn.m,
+ BNT/examples/dynamic/cmp_learning_dbn.m,
+ BNT/examples/dynamic/cmp_online_inference.m,
+ BNT/examples/dynamic/fhmm_infer.m,
+ BNT/examples/dynamic/filter_test1.m,
+ BNT/examples/dynamic/kalman1.m,
+ BNT/examples/dynamic/kjaerulff1.m,
+ BNT/examples/dynamic/loopy_dbn1.m,
+ BNT/examples/dynamic/mk_collage_from_clqs.m,
+ BNT/examples/dynamic/mk_fhmm.m, BNT/examples/dynamic/reveal1.m,
+ BNT/examples/dynamic/scg_dbn.m,
+ BNT/examples/dynamic/skf_data_assoc_gmux.m,
+ BNT/examples/dynamic/HHMM/add_hhmm_end_state.m,
+ BNT/examples/dynamic/HHMM/hhmm_jtree_clqs.m,
+ BNT/examples/dynamic/HHMM/mk_hhmm_topo.m,
+ BNT/examples/dynamic/HHMM/mk_hhmm_topo_F1.m,
+ BNT/examples/dynamic/HHMM/pretty_print_hhmm_parse.m,
+ BNT/examples/dynamic/HHMM/Motif/fixed_args_mk_motif_hhmm.m,
+ BNT/examples/dynamic/HHMM/Motif/mk_motif_hhmm.m,
+ BNT/examples/dynamic/HHMM/Motif/sample_motif_hhmm.m,
+ BNT/examples/dynamic/HHMM/Old/mk_abcd_hhmm.m,
+ BNT/examples/dynamic/HHMM/Old/mk_arrow_alpha_hhmm3.m,
+ BNT/examples/dynamic/HHMM/Old/mk_hhmm2.m,
+ BNT/examples/dynamic/HHMM/Old/mk_hhmm3.m,
+ BNT/examples/dynamic/HHMM/Old/mk_hhmm3_args.m,
+ BNT/examples/dynamic/HHMM/Old/motif_hhmm.m,
+ BNT/examples/dynamic/HHMM/Old/remove_hhmm_end_state.m,
+ BNT/examples/dynamic/HHMM/Square/get_square_data.m,
+ BNT/examples/dynamic/HHMM/Square/hhmm_inference.m,
+ BNT/examples/dynamic/HHMM/Square/is_F2_true_D3.m,
+ BNT/examples/dynamic/HHMM/Square/learn_square_hhmm_discrete.m,
+ BNT/examples/dynamic/HHMM/Square/mk_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/plot_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/sample_square_hhmm_cts.m,
+ BNT/examples/dynamic/HHMM/Square/sample_square_hhmm_discrete.m,
+ BNT/examples/dynamic/HHMM/Square/square4.mat,
+ BNT/examples/dynamic/HHMM/Square/square4_cases.mat,
+ BNT/examples/dynamic/HHMM/Square/test_square_fig.m,
+ BNT/examples/dynamic/HHMM/Square/test_square_fig.mat,
+ BNT/examples/dynamic/HHMM/Square/Old/learn_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/Old/mk_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/Old/plot_square_hhmm.m,
+ BNT/examples/dynamic/HHMM/Square/Old/sample_square_hhmm.m,
+ BNT/examples/dynamic/Old/chmm1.m,
+ BNT/examples/dynamic/Old/cmp_inference.m,
+ BNT/examples/dynamic/Old/kalman1.m,
+ BNT/examples/dynamic/Old/old.water1.m,
+ BNT/examples/dynamic/Old/online1.m,
+ BNT/examples/dynamic/Old/online2.m,
+ BNT/examples/dynamic/Old/scg_dbn.m,
+ BNT/examples/dynamic/SLAM/mk_gmux_robot_dbn.m,
+ BNT/examples/dynamic/SLAM/mk_linear_slam.m,
+ BNT/examples/dynamic/SLAM/slam_kf.m,
+ BNT/examples/dynamic/SLAM/slam_offline_loopy.m,
+ BNT/examples/dynamic/SLAM/slam_partial_kf.m,
+ BNT/examples/dynamic/SLAM/slam_stationary_loopy.m,
+ BNT/examples/dynamic/SLAM/Old/offline_loopy_slam.m,
+ BNT/examples/dynamic/SLAM/Old/paskin1.m,
+ BNT/examples/dynamic/SLAM/Old/skf_data_assoc_gmux2.m,
+ BNT/examples/dynamic/SLAM/Old/slam_kf.m,
+ BNT/examples/limids/id1.m, BNT/examples/limids/pigs1.m,
+ BNT/examples/static/cg1.m, BNT/examples/static/cg2.m,
+ BNT/examples/static/discrete2.m, BNT/examples/static/discrete3.m,
+ BNT/examples/static/fa1.m, BNT/examples/static/gaussian1.m,
+ BNT/examples/static/gibbs_test1.m, BNT/examples/static/lw1.m,
+ BNT/examples/static/mfa1.m, BNT/examples/static/mixexp1.m,
+ BNT/examples/static/mixexp2.m, BNT/examples/static/mixexp3.m,
+ BNT/examples/static/mog1.m, BNT/examples/static/qmr1.m,
+ BNT/examples/static/sample1.m, BNT/examples/static/softmax1.m,
+ BNT/examples/static/Belprop/belprop_loop1_discrete.m,
+ BNT/examples/static/Belprop/belprop_loop1_gauss.m,
+ BNT/examples/static/Belprop/belprop_loopy_cg.m,
+ BNT/examples/static/Belprop/belprop_loopy_discrete.m,
+ BNT/examples/static/Belprop/belprop_loopy_gauss.m,
+ BNT/examples/static/Belprop/belprop_polytree_cg.m,
+ BNT/examples/static/Belprop/belprop_polytree_gauss.m,
+ BNT/examples/static/Belprop/bp1.m,
+ BNT/examples/static/Belprop/gmux1.m,
+ BNT/examples/static/Brutti/Belief_IOhmm.m,
+ BNT/examples/static/Brutti/Belief_hmdt.m,
+ BNT/examples/static/Brutti/Belief_hme.m,
+ BNT/examples/static/Brutti/Sigmoid_Belief.m,
+ BNT/examples/static/HME/HMEforMatlab.jpg,
+ BNT/examples/static/HME/README, BNT/examples/static/HME/fhme.m,
+ BNT/examples/static/HME/gen_data.m,
+ BNT/examples/static/HME/hme_class_plot.m,
+ BNT/examples/static/HME/hme_reg_plot.m,
+ BNT/examples/static/HME/hme_topobuilder.m,
+ BNT/examples/static/HME/test_data_class.mat,
+ BNT/examples/static/HME/test_data_class2.mat,
+ BNT/examples/static/HME/test_data_reg.mat,
+ BNT/examples/static/HME/train_data_class.mat,
+ BNT/examples/static/HME/train_data_reg.mat,
+ BNT/examples/static/Misc/mixexp_data.txt,
+ BNT/examples/static/Misc/mixexp_graddesc.m,
+ BNT/examples/static/Misc/mixexp_plot.m,
+ BNT/examples/static/Misc/sprinkler.bif,
+ BNT/examples/static/Models/mk_cancer_bnet.m,
+ BNT/examples/static/Models/mk_car_bnet.m,
+ BNT/examples/static/Models/mk_ideker_bnet.m,
+ BNT/examples/static/Models/mk_incinerator_bnet.m,
+ BNT/examples/static/Models/mk_markov_chain_bnet.m,
+ BNT/examples/static/Models/mk_minimal_qmr_bnet.m,
+ BNT/examples/static/Models/mk_qmr_bnet.m,
+ BNT/examples/static/Models/mk_vstruct_bnet.m,
+ BNT/examples/static/Models/Old/mk_hmm_bnet.m,
+ BNT/examples/static/SCG/scg1.m, BNT/examples/static/SCG/scg2.m,
+ BNT/examples/static/SCG/scg3.m,
+ BNT/examples/static/SCG/scg_3node.m,
+ BNT/examples/static/SCG/scg_unstable.m,
+ BNT/examples/static/StructLearn/bic1.m,
+ BNT/examples/static/StructLearn/cooper_yoo.m,
+ BNT/examples/static/StructLearn/k2demo1.m,
+ BNT/examples/static/StructLearn/mcmc1.m,
+ BNT/examples/static/StructLearn/pc1.m,
+ BNT/examples/static/StructLearn/pc2.m,
+ BNT/examples/static/Zoubin/README,
+ BNT/examples/static/Zoubin/csum.m,
+ BNT/examples/static/Zoubin/ffa.m,
+ BNT/examples/static/Zoubin/mfa.m,
+ BNT/examples/static/Zoubin/mfa_cl.m,
+ BNT/examples/static/Zoubin/mfademo.m,
+ BNT/examples/static/Zoubin/rdiv.m,
+ BNT/examples/static/Zoubin/rprod.m,
+ BNT/examples/static/Zoubin/rsum.m,
+ BNT/examples/static/dtree/test_housing.m,
+ BNT/examples/static/dtree/test_restaurants.m,
+ BNT/examples/static/dtree/test_zoo1.m,
+ BNT/examples/static/dtree/tmp.dot,
+ BNT/examples/static/dtree/transform_data_into_bnt_format.m,
+ BNT/examples/static/fgraph/fg2.m,
+ BNT/examples/static/fgraph/fg3.m,
+ BNT/examples/static/fgraph/fg_mrf1.m,
+ BNT/examples/static/fgraph/fg_mrf2.m,
+ BNT/general/bnet_to_fgraph.m,
+ BNT/general/compute_fwd_interface.m,
+ BNT/general/compute_interface_nodes.m,
+ BNT/general/compute_minimal_interface.m,
+ BNT/general/dbn_to_bnet.m,
+ BNT/general/determine_elim_constraints.m,
+ BNT/general/do_intervention.m, BNT/general/dsep.m,
+ BNT/general/enumerate_scenarios.m, BNT/general/fgraph_to_bnet.m,
+ BNT/general/log_lik_complete.m,
+ BNT/general/log_marg_lik_complete.m, BNT/general/mk_bnet.m,
+ BNT/general/mk_fgraph.m, BNT/general/mk_limid.m,
+ BNT/general/mk_mutilated_samples.m,
+ BNT/general/mk_slice_and_half_dbn.m,
+ BNT/general/partition_dbn_nodes.m,
+ BNT/general/sample_bnet_nocell.m, BNT/general/sample_dbn.m,
+ BNT/general/score_bnet_complete.m,
+ BNT/general/unroll_dbn_topology.m,
+ BNT/general/Old/bnet_to_gdl_graph.m,
+ BNT/general/Old/calc_mpe_bucket.m,
+ BNT/general/Old/calc_mpe_dbn.m,
+ BNT/general/Old/calc_mpe_given_inf_engine.m,
+ BNT/general/Old/calc_mpe_global.m,
+ BNT/general/Old/compute_interface_nodes.m,
+ BNT/general/Old/mk_gdl_graph.m, GraphViz/draw_dbn.m,
+ GraphViz/make_layout.m, BNT/license.gpl.txt,
+ BNT/general/add_evidence_to_gmarginal.m,
+ BNT/inference/@inf_engine/bnet_from_engine.m,
+ BNT/inference/@inf_engine/get_field.m,
+ BNT/inference/@inf_engine/inf_engine.m,
+ BNT/inference/@inf_engine/marginal_family.m,
+ BNT/inference/@inf_engine/set_fields.m,
+ BNT/inference/@inf_engine/update_engine.m,
+ BNT/inference/@inf_engine/Old/marginal_family_pot.m,
+ BNT/inference/@inf_engine/Old/observed_nodes.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/bk_ff_hmm_inf_engine.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_init_bel.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_marginal_from_bel.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_predict_bel.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel1.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/bk_ff_fb.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/combine_marginals_into_joint.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/dbn_to_hmm.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_mat.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec1.m,
+ BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/project_joint_onto_marginals.m,
+ BNT/inference/dynamic/@bk_inf_engine/bk_inf_engine.m,
+ BNT/inference/dynamic/@bk_inf_engine/dbn_init_bel.m,
+ BNT/inference/dynamic/@bk_inf_engine/dbn_marginal_from_bel.m,
+ BNT/inference/dynamic/@bk_inf_engine/dbn_update_bel.m,
+ BNT/inference/dynamic/@bk_inf_engine/dbn_update_bel1.m,
+ BNT/inference/dynamic/@bk_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@bk_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@bk_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@bk_inf_engine/update_engine.m,
+ BNT/inference/dynamic/@ff_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/enter_soft_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/ff_inf_engine.m,
+ BNT/inference/dynamic/@ff_inf_engine/filter_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@ff_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@ff_inf_engine/smooth_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence.m,
+ BNT/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence1.m,
+ BNT/inference/dynamic/@ff_inf_engine/Old/marginal_family.m,
+ BNT/inference/dynamic/@frontier_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@frontier_inf_engine/enter_soft_evidence.m,
+ BNT/inference/dynamic/@frontier_inf_engine/frontier_inf_engine.m,
+ BNT/inference/dynamic/@frontier_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@frontier_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@frontier_inf_engine/set_fwdback.m,
+ BNT/inference/dynamic/@hmm_inf_engine/update_engine.m,
+ BNT/inference/dynamic/@hmm_inf_engine/Old/dhmm_inf_engine.m,
+ BNT/inference/dynamic/@hmm_inf_engine/Old/marginal_family.m,
+ BNT/inference/dynamic/@hmm_inf_engine/Old/marginal_nodes.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence1.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence2.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence3.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence4.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/marginal_nodes.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_nonint.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_trans.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine1.m,
+ BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine2.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/jtree_unrolled_dbn_inf_engine.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/update_engine.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_family.m,
+ BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_nodes.m,
+ BNT/inference/dynamic/@kalman_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@kalman_inf_engine/kalman_inf_engine.m,
+ BNT/inference/dynamic/@kalman_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@kalman_inf_engine/update_engine.m,
+ BNT/inference/dynamic/@kalman_inf_engine/private/dbn_to_lds.m,
+ BNT/inference/dynamic/@kalman_inf_engine/private/extract_params_from_gbn.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/enter_soft_ev.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/pearl_dbn_inf_engine.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/correct_smooth.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/enter_evidence.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence_obj_oriented.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence_fast.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/wrong_smooth.m,
+ BNT/inference/dynamic/@pearl_dbn_inf_engine/private/init_pearl_dbn_ev_msgs.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/enter_evidence.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_family.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_nodes.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/pearl_unrolled_dbn_inf_engine.m,
+ BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/update_engine.m,
+ BNT/inference/online/@filter_engine/bnet_from_engine.m,
+ BNT/inference/online/@filter_engine/enter_evidence.m,
+ BNT/inference/online/@filter_engine/filter_engine.m,
+ BNT/inference/online/@filter_engine/marginal_family.m,
+ BNT/inference/online/@filter_engine/marginal_nodes.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/back.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/backT.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/fwd.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/fwd1.m,
+ BNT/inference/online/@hmm_2TBN_inf_engine/update_engine.m,
+ BNT/inference/online/@jtree_2TBN_inf_engine/marginal_family.m,
+ BNT/inference/online/@jtree_2TBN_inf_engine/marginal_nodes.m,
+ BNT/inference/online/@jtree_2TBN_inf_engine/Old/jtree_2TBN_inf_engine.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/back.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/back1.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/backT.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/enter_evidence.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/fwd.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/fwd1.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/jtree_sparse_2TBN_inf_engine.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_family.m,
+ BNT/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_nodes.m,
+ BNT/inference/online/@smoother_engine/bnet_from_engine.m,
+ BNT/inference/online/@smoother_engine/marginal_family.m,
+ BNT/inference/online/@smoother_engine/marginal_nodes.m,
+ BNT/inference/online/@smoother_engine/smoother_engine.m,
+ BNT/inference/online/@smoother_engine/update_engine.m,
+ BNT/inference/static/@belprop_fg_inf_engine/belprop_fg_inf_engine.m,
+ BNT/inference/static/@belprop_fg_inf_engine/enter_evidence.m,
+ BNT/inference/static/@belprop_fg_inf_engine/loopy_converged.m,
+ BNT/inference/static/@belprop_fg_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@belprop_fg_inf_engine/set_params.m,
+ BNT/inference/static/@belprop_inf_engine/enter_evidence.m,
+ BNT/inference/static/@belprop_inf_engine/loopy_converged.m,
+ BNT/inference/static/@belprop_inf_engine/marginal_family.m,
+ BNT/inference/static/@belprop_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@belprop_inf_engine/Old/belprop_gdl_inf_engine.m,
+ BNT/inference/static/@belprop_inf_engine/Old/belprop_inf_engine_nostr.m,
+ BNT/inference/static/@belprop_inf_engine/Old/enter_evidence.m,
+ BNT/inference/static/@belprop_inf_engine/Old/enter_evidence1.m,
+ BNT/inference/static/@belprop_inf_engine/Old/marginal_domain.m,
+ BNT/inference/static/@belprop_inf_engine/private/junk,
+ BNT/inference/static/@belprop_inf_engine/private/parallel_protocol.m,
+ BNT/inference/static/@belprop_inf_engine/private/tree_protocol.m,
+ BNT/inference/static/@cond_gauss_inf_engine/cond_gauss_inf_engine.m,
+ BNT/inference/static/@cond_gauss_inf_engine/enter_evidence.m,
+ BNT/inference/static/@cond_gauss_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@enumerative_inf_engine/enter_evidence.m,
+ BNT/inference/static/@enumerative_inf_engine/enumerative_inf_engine.m,
+ BNT/inference/static/@enumerative_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@gaussian_inf_engine/enter_evidence.m,
+ BNT/inference/static/@gaussian_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@gaussian_inf_engine/private/extract_params_from_gbn.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/enter_evidence.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/gibbs_sampling_inf_engine.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/CPT.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_children.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_families.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_families_dbn.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior.c,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior_dbn.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_strides.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/get_cpts.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.c,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/my_sample_discrete.m,
+ BNT/inference/static/@gibbs_sampling_inf_engine/private/sample_single_discrete.c,
+ BNT/inference/static/@global_joint_inf_engine/global_joint_inf_engine.m,
+ BNT/inference/static/@global_joint_inf_engine/marginal_family.m,
+ BNT/inference/static/@jtree_inf_engine/cliques_from_engine.m,
+ BNT/inference/static/@jtree_inf_engine/clq_containing_nodes.m,
+ BNT/inference/static/@jtree_inf_engine/collect_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/enter_soft_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/marginal_family.m,
+ BNT/inference/static/@jtree_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@jtree_inf_engine/set_fields.m,
+ BNT/inference/static/@jtree_inf_engine/Old/collect_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/Old/distribute_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/Old/enter_evidence.m,
+ BNT/inference/static/@jtree_inf_engine/Old/enter_soft_evidence.m,
+ BNT/inference/static/@jtree_limid_inf_engine/enter_evidence.m,
+ BNT/inference/static/@jtree_limid_inf_engine/jtree_limid_inf_engine.m,
+ BNT/inference/static/@jtree_limid_inf_engine/marginal_family.m,
+ BNT/inference/static/@jtree_limid_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@jtree_limid_inf_engine/Old/marginal_family.m,
+ BNT/inference/static/@jtree_limid_inf_engine/Old/marginal_nodes_SS.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/cliques_from_engine.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/clq_containing_nodes.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/collect_evidence.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/distribute_evidence.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/enter_evidence.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/enter_soft_evidence.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/init_pot.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/marginal_family.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/set_fields.m,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/collect_evidence.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/distribute_evidence.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot1.c,
+ BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot1.m,
+ BNT/inference/static/@likelihood_weighting_inf_engine/enter_evidence.m,
+ BNT/inference/static/@likelihood_weighting_inf_engine/likelihood_weighting_inf_engine.m,
+ BNT/inference/static/@likelihood_weighting_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@pearl_inf_engine/enter_evidence.m,
+ BNT/inference/static/@pearl_inf_engine/loopy_converged.m,
+ BNT/inference/static/@pearl_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@pearl_inf_engine/private/compute_bel.m,
+ BNT/inference/static/@pearl_inf_engine/private/prod_lambda_msgs.m,
+ BNT/inference/static/@pearl_inf_engine/private/tree_protocol.m,
+ BNT/inference/static/@quickscore_inf_engine/enter_evidence.m,
+ BNT/inference/static/@quickscore_inf_engine/marginal_nodes.m,
+ BNT/inference/static/@quickscore_inf_engine/quickscore_inf_engine.m,
+ BNT/inference/static/@quickscore_inf_engine/private/C_quickscore.c,
+ BNT/inference/static/@quickscore_inf_engine/private/nr.h,
+ BNT/inference/static/@quickscore_inf_engine/private/nrutil.c,
+ BNT/inference/static/@quickscore_inf_engine/private/nrutil.h,
+ BNT/inference/static/@quickscore_inf_engine/private/quickscore.m,
+ BNT/learning/bayes_update_params.m,
+ BNT/learning/bic_score_family.m,
+ BNT/learning/compute_cooling_schedule.m,
+ BNT/learning/dirichlet_score_family.m,
+ BNT/learning/kpm_learn_struct_mcmc.m,
+ BNT/learning/learn_params_em.m,
+ BNT/learning/learn_struct_dbn_reveal.m,
+ BNT/learning/learn_struct_pdag_ic_star.m,
+ BNT/learning/mcmc_sample_to_hist.m, BNT/learning/mk_schedule.m,
+ BNT/learning/mk_tetrad_data_file.m,
+ BNT/learning/score_dags_old.m, HMM/dhmm_logprob_brute_force.m,
+ HMM/dhmm_logprob_path.m, HMM/mdp_sample.m, Kalman/AR_to_SS.m,
+ Kalman/SS_to_AR.m, Kalman/convert_to_lagged_form.m,
+ Kalman/ensure_AR.m, Kalman/eval_AR_perf.m,
+ Kalman/kalman_filter.m, Kalman/kalman_smoother.m,
+ Kalman/kalman_update.m, Kalman/learn_AR.m,
+ Kalman/learn_AR_diagonal.m, Kalman/learn_kalman.m,
+ Kalman/smooth_update.m,
+ BNT/general/convert_dbn_CPDs_to_tables_slow.m,
+ BNT/general/dispcpt.m, BNT/general/linear_gaussian_to_cpot.m,
+ BNT/general/partition_matrix_vec_3.m,
+ BNT/general/shrink_obs_dims_in_gaussian.m,
+ BNT/general/shrink_obs_dims_in_table.m,
+ BNT/potentials/CPD_to_pot.m, BNT/potentials/README,
+ BNT/potentials/check_for_cd_arcs.m,
+ BNT/potentials/determine_pot_type.m,
+ BNT/potentials/mk_initial_pot.m,
+ BNT/potentials/@cgpot/cg_can_to_mom.m,
+ BNT/potentials/@cgpot/cg_mom_to_can.m,
+ BNT/potentials/@cgpot/cgpot.m, BNT/potentials/@cgpot/display.m,
+ BNT/potentials/@cgpot/divide_by_pot.m,
+ BNT/potentials/@cgpot/domain_pot.m,
+ BNT/potentials/@cgpot/enter_cts_evidence_pot.m,
+ BNT/potentials/@cgpot/enter_discrete_evidence_pot.m,
+ BNT/potentials/@cgpot/marginalize_pot.m,
+ BNT/potentials/@cgpot/multiply_by_pot.m,
+ BNT/potentials/@cgpot/multiply_pots.m,
+ BNT/potentials/@cgpot/normalize_pot.m,
+ BNT/potentials/@cgpot/pot_to_marginal.m,
+ BNT/potentials/@cgpot/Old/normalize_pot.m,
+ BNT/potentials/@cgpot/Old/simple_marginalize_pot.m,
+ BNT/potentials/@cpot/cpot.m, BNT/potentials/@cpot/cpot_to_mpot.m,
+ BNT/potentials/@cpot/display.m,
+ BNT/potentials/@cpot/divide_by_pot.m,
+ BNT/potentials/@cpot/domain_pot.m,
+ BNT/potentials/@cpot/enter_cts_evidence_pot.m,
+ BNT/potentials/@cpot/marginalize_pot.m,
+ BNT/potentials/@cpot/multiply_by_pot.m,
+ BNT/potentials/@cpot/multiply_pots.m,
+ BNT/potentials/@cpot/normalize_pot.m,
+ BNT/potentials/@cpot/pot_to_marginal.m,
+ BNT/potentials/@cpot/rescale_pot.m,
+ BNT/potentials/@cpot/set_domain_pot.m,
+ BNT/potentials/@cpot/Old/cpot_to_mpot.m,
+ BNT/potentials/@cpot/Old/normalize_pot.convert.m,
+ BNT/potentials/@dpot/approxeq_pot.m,
+ BNT/potentials/@dpot/display.m,
+ BNT/potentials/@dpot/domain_pot.m,
+ BNT/potentials/@dpot/dpot_to_table.m,
+ BNT/potentials/@dpot/get_fields.m,
+ BNT/potentials/@dpot/multiply_pots.m,
+ BNT/potentials/@dpot/pot_to_marginal.m,
+ BNT/potentials/@dpot/set_domain_pot.m,
+ BNT/potentials/@mpot/display.m,
+ BNT/potentials/@mpot/marginalize_pot.m,
+ BNT/potentials/@mpot/mpot.m, BNT/potentials/@mpot/mpot_to_cpot.m,
+ BNT/potentials/@mpot/normalize_pot.m,
+ BNT/potentials/@mpot/pot_to_marginal.m,
+ BNT/potentials/@mpot/rescale_pot.m,
+ BNT/potentials/@upot/approxeq_pot.m,
+ BNT/potentials/@upot/display.m,
+ BNT/potentials/@upot/divide_by_pot.m,
+ BNT/potentials/@upot/marginalize_pot.m,
+ BNT/potentials/@upot/multiply_by_pot.m,
+ BNT/potentials/@upot/normalize_pot.m,
+ BNT/potentials/@upot/pot_to_marginal.m,
+ BNT/potentials/@upot/upot.m,
+ BNT/potentials/@upot/upot_to_opt_policy.m,
+ BNT/potentials/Old/comp_eff_node_sizes.m,
+ BNT/potentials/Tables/divide_by_sparse_table.c,
+ BNT/potentials/Tables/divide_by_table.c,
+ BNT/potentials/Tables/marg_sparse_table.c,
+ BNT/potentials/Tables/marg_table.c,
+ BNT/potentials/Tables/mult_by_sparse_table.c,
+ BNT/potentials/Tables/rep_mult.c, HMM/mk_leftright_transmat.m:
+ Initial revision
+
+2002-05-29 04:59 yozhik
+
+ * BNT/inference/static/@stab_cond_gauss_inf_engine/:
+ clq_containing_nodes.m, problems.txt, push_pot_toclique.m,
+ Old/initialize_engine.m: Initial import of code base from Kevin
+ Murphy.
+
+2002-05-29 04:59 yozhik
+
+ * BNT/inference/static/@stab_cond_gauss_inf_engine/:
+ clq_containing_nodes.m, problems.txt, push_pot_toclique.m,
+ Old/initialize_engine.m: Initial revision
+
+2002-05-19 15:11 yozhik
+
+ * BNT/potentials/: @scgcpot/marginalize_pot.m,
+ @scgcpot/normalize_pot.m, @scgcpot/rescale_pot.m,
+ @scgcpot/scgcpot.m, @scgpot/direct_combine_pots.m,
+ @scgpot/pot_to_marginal.m: Initial import of code base from Kevin
+ Murphy.
+
+2002-05-19 15:11 yozhik
+
+ * BNT/potentials/: @scgcpot/marginalize_pot.m,
+ @scgcpot/normalize_pot.m, @scgcpot/rescale_pot.m,
+ @scgcpot/scgcpot.m, @scgpot/direct_combine_pots.m,
+ @scgpot/pot_to_marginal.m: Initial revision
+
+2001-07-28 08:43 yozhik
+
+ * BNT/potentials/genops.c: Initial import of code base from Kevin
+ Murphy.
+
+2001-07-28 08:43 yozhik
+
+ * BNT/potentials/genops.c: Initial revision
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Eqns/lin_reg_eqn.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Eqns/lin_reg_eqn.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/CPTgrass.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/CPTgrass.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/HMEforMatlab.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/HMEforMatlab.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/ar1.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/ar1.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 600 450 300 225 600 450 900 675
+1 1 0 1 -1 0 0 0 -1 0.000 1 0.0000 1725 450 300 225 1725 450 2025 675
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 450 1425 450
+4 0 -1 0 0 0 12 0.0000 4 180 2310 300 1050 Auto Regressive model AR(1)\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 450 525 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 1575 525 X2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/ar1.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/ar1.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/batnet.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/batnet.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,318 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+0 32 #dfdfdf
+5 1 1 1 -1 -1 0 0 -1 4.000 0 1 0 0 5250.000 2175.000 2250 1500 2175 2175 2250 2850
+5 1 1 1 -1 -1 0 0 -1 4.000 0 1 0 0 6594.530 5137.500 2250 4200 2150 5137 2250 6075
+5 1 1 1 -1 -1 0 0 -1 4.000 0 1 0 0 3562.500 3450.000 2250 3000 2175 3450 2250 3900
+5 1 1 1 -1 -1 0 0 -1 4.000 0 1 0 0 2576.560 6412.500 2250 6225 2200 6412 2250 6600
+6 6075 3300 7125 3600
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7125 3600 7125 3300 6075 3300 6075 3600 7125 3600
+4 1 -1 0 0 0 12 0.0000 4 135 1020 6600 3525 SensorValid1\001
+-6
+6 6450 3975 7500 4275
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7500 4275 7500 3975 6450 3975 6450 4275 7500 4275
+4 1 -1 0 0 0 12 0.0000 4 135 870 6975 4200 FYdotDiff1\001
+-6
+6 6600 4575 7650 4875
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7650 4875 7650 4575 6600 4575 6600 4875 7650 4875
+4 1 -1 0 0 0 12 0.0000 4 135 975 7125 4800 FcloseSlow1\001
+-6
+6 2400 3075 3000 3375
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3000 3375 3000 3075 2400 3075 2400 3375 3000 3375
+4 1 -1 0 0 0 12 0.0000 4 135 465 2700 3300 Xdot0\001
+-6
+6 5025 3075 5625 3375
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 3375 5625 3075 5025 3075 5025 3375 5625 3375
+4 1 -1 0 0 0 12 0.0000 4 135 465 5325 3300 Xdot1\001
+-6
+6 2400 3600 3150 3900
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3150 3900 3150 3600 2400 3600 2400 3900 3150 3900
+4 1 -1 0 0 0 12 0.0000 4 135 615 2775 3825 InLane0\001
+-6
+6 4875 3600 5625 3900
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 3900 5625 3600 4875 3600 4875 3900 5625 3900
+4 1 -1 0 0 0 12 0.0000 4 135 615 5250 3825 InLane1\001
+-6
+6 2400 1500 3150 1800
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3150 1800 3150 1500 2400 1500 2400 1800 3150 1800
+4 1 -1 0 0 0 12 0.0000 4 135 630 2775 1725 LeftClr0\001
+-6
+6 4875 1500 5625 1800
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 1800 5625 1500 4875 1500 4875 1800 5625 1800
+4 1 -1 0 0 0 12 0.0000 4 135 630 5250 1725 LeftClr1\001
+-6
+6 2400 2025 3300 2325
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3300 2325 3300 2025 2400 2025 2400 2325 3300 2325
+4 1 -1 0 0 0 12 0.0000 4 180 720 2850 2250 RightClr0\001
+-6
+6 4800 2025 5625 2325
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 2325 5625 2025 4800 2025 4800 2325 5625 2325
+4 1 -1 0 0 0 12 0.0000 4 180 720 5250 2250 RightClr1\001
+-6
+6 2400 2550 3300 2850
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3300 2850 3300 2550 2400 2550 2400 2850 3300 2850
+4 1 -1 0 0 0 12 0.0000 4 135 855 2850 2775 LatAction0\001
+-6
+6 4725 2550 5625 2850
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 2850 5625 2550 4725 2550 4725 2850 5625 2850
+4 1 -1 0 0 0 12 0.0000 4 135 855 5175 2775 LatAction1\001
+-6
+6 2400 4200 3450 4500
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3450 4500 3450 4200 2400 4200 2400 4500 3450 4500
+4 1 -1 0 0 0 12 0.0000 4 135 930 2925 4425 FwdAction0\001
+-6
+6 4575 4200 5625 4500
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 4500 5625 4200 4575 4200 4575 4500 5625 4500
+4 1 -1 0 0 0 12 0.0000 4 135 930 5100 4425 FwdAction1\001
+-6
+6 2400 4725 3000 5025
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3000 5025 3000 4725 2400 4725 2400 5025 3000 5025
+4 1 -1 0 0 0 12 0.0000 4 135 465 2700 4950 Ydot0\001
+-6
+6 5025 4725 5625 5025
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 5025 5625 4725 5025 4725 5025 5025 5625 5025
+4 1 -1 0 0 0 12 0.0000 4 135 465 5325 4950 Ydot1\001
+-6
+6 2400 5250 3150 5550
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3150 5550 3150 5250 2400 5250 2400 5550 3150 5550
+4 1 -1 0 0 0 12 0.0000 4 180 705 2775 5475 Stopped0\001
+-6
+6 4500 5250 5250 5550
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5250 5550 5250 5250 4500 5250 4500 5550 5250 5550
+4 1 -1 0 0 0 12 0.0000 4 180 705 4875 5475 Stopped1\001
+-6
+6 6450 5325 7200 5625
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7200 5625 7200 5325 6450 5325 6450 5625 7200 5625
+4 1 -1 0 0 0 12 0.0000 4 135 585 6825 5550 BXdot1\001
+-6
+6 2400 5775 3300 6075
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3300 6075 3300 5775 2400 5775 2400 6075 3300 6075
+4 1 -1 0 0 0 12 0.0000 4 180 885 2850 6000 EngStatus0\001
+-6
+6 4575 5775 5475 6075
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5475 6075 5475 5775 4575 5775 4575 6075 5475 6075
+4 1 -1 0 0 0 12 0.0000 4 180 885 5025 6000 EngStatus1\001
+-6
+6 6150 5775 7200 6075
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7200 6075 7200 5775 6150 5775 6150 6075 7200 6075
+4 1 -1 0 0 0 12 0.0000 4 135 960 6675 6000 BcloseFast1\001
+-6
+6 2400 6300 3750 6600
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3750 6600 3750 6300 2400 6300 2400 6600 3750 6600
+4 1 -1 0 0 0 12 0.0000 4 135 1380 3075 6525 FrontBackStatus0\001
+-6
+6 4125 6300 5475 6600
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5475 6600 5475 6300 4125 6300 4125 6600 5475 6600
+4 1 -1 0 0 0 12 0.0000 4 135 1380 4800 6525 FrontBackStatus1\001
+-6
+6 6150 6300 7200 6600
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7200 6600 7200 6300 6150 6300 6150 6600 7200 6600
+4 1 -1 0 0 0 12 0.0000 4 135 885 6675 6525 BYdotDiff1\001
+-6
+6 7650 5025 8250 5325
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 8250 5325 8250 5025 7650 5025 7650 5325 8250 5325
+4 1 -1 0 0 0 12 0.0000 4 135 390 7950 5250 Fclr1\001
+-6
+6 7650 5775 8250 6075
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 8250 6075 8250 5775 7650 5775 7650 6075 8250 6075
+4 1 -1 0 0 0 12 0.0000 4 135 405 7950 6000 Bclr1\001
+-6
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 3750 4725 2775
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3000 3225 4875 3675
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5175 5025 5025 5250
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5325 5775 5475 5025
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5400 5775 6450 4200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5175 2850 5325 3075
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5100 4500 5175 4725
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 1650 4800 2550
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3750 6375 4650 4500
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 2700 4650 4200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5475 6000 6450 5475
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7725 5775 7200 5550
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3675 6300 4800 2850
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 6150 6000 5475 6525
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 3
+ 3 1 1.00 60.00 120.00
+ 5625 4800 6375 3750 8850 3750
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 6600 4800 5475 6375
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 6675 6300 6675 6075
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 3
+ 3 1 1.00 60.00 120.00
+ 5475 5850 6150 5175 7650 5175
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3000 3225 5025 3225
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5625 3225 8850 3225
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7125 3450 8850 3300
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 3750 4875 3750
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7125 3450 8850 3675
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 1650 4875 1650
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5625 1650 8700 1650
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 2175 4800 2175
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5625 2175 8550 2175
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 2700 4725 2700
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 2100 4725 2625
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5625 2700 8700 2700
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3450 4350 4575 4350
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7500 4125 8400 4350
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7125 4275 7125 4575
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3000 4875 5025 4875
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7800 5025 7650 4800
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 5400 4500 5400
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7200 5400 8700 5400
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 5925 4575 5925
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7650 5925 7200 5925
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7200 6450 8400 6450
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 8250 5175 8850 4875
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 8250 5925 8850 5925
+2 4 1 1 -1 -1 0 0 -1 4.000 0 0 20 0 0 5
+ 3975 6700 1875 6700 1875 4100 3975 4100 3975 6700
+2 4 1 1 -1 -1 0 0 -1 4.000 0 0 20 0 0 5
+ 3975 4000 1875 4000 1875 1400 3975 1400 3975 4000
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 6600 9750 6300 8400 6300 8400 6600 9750 6600
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 6075 9750 5775 8850 5775 8850 6075 9750 6075
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 5550 9750 5250 8700 5250 8700 5550 9750 5550
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 5025 9750 4725 8850 4725 8850 5025 9750 5025
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 4500 9750 4200 8400 4200 8400 4500 9750 4500
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 3900 9750 3600 8850 3600 8850 3900 9750 3900
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 3375 9750 3075 8850 3075 8850 3375 9750 3375
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 2850 9750 2550 8700 2550 8700 2850 9750 2850
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 2325 9750 2025 8550 2025 8550 2325 9750 2325
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 1800 9750 1500 8700 1500 8700 1800 9750 1800
+4 1 -1 0 0 0 12 0.0000 4 135 840 9300 3300 XdotSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 840 9300 3825 YdotSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 1005 9225 1725 LeftClrSens1\001
+4 1 -1 0 0 0 12 0.0000 4 180 1095 9150 2250 RightClrSens1\001
+4 1 -1 0 0 0 12 0.0000 4 180 900 9225 2775 TurnSignal1\001
+4 1 -1 0 0 0 12 0.0000 4 135 1245 9075 4425 FYdotDiffSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 765 9300 4950 FclrSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 960 9225 5475 BXdotSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 780 9300 6000 BclrSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 1260 9075 6525 BYdotDiffSens1\001
+4 1 -1 0 0 0 16 0.0000 4 165 585 2925 7050 slice t\001
+4 1 -1 0 0 0 16 0.0000 4 165 840 4800 7050 slice t+1\001
+4 1 -1 0 0 0 16 0.0000 4 165 855 9150 7050 evidence\001
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/batnet.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/batnet.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/batnet_numbered.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/batnet_numbered.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,345 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+0 32 #dfdfdf
+5 1 1 1 -1 -1 0 0 -1 4.000 0 1 0 0 5250.000 2175.000 2250 1500 2175 2175 2250 2850
+5 1 1 1 -1 -1 0 0 -1 4.000 0 1 0 0 6594.530 5137.500 2250 4200 2150 5137 2250 6075
+5 1 1 1 -1 -1 0 0 -1 4.000 0 1 0 0 3562.500 3450.000 2250 3000 2175 3450 2250 3900
+5 1 1 1 -1 -1 0 0 -1 4.000 0 1 0 0 2576.560 6412.500 2250 6225 2200 6412 2250 6600
+6 6075 3300 7125 3600
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7125 3600 7125 3300 6075 3300 6075 3600 7125 3600
+4 1 -1 0 0 0 12 0.0000 4 135 1020 6600 3525 SensorValid1\001
+-6
+6 6450 3975 7500 4275
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7500 4275 7500 3975 6450 3975 6450 4275 7500 4275
+4 1 -1 0 0 0 12 0.0000 4 135 870 6975 4200 FYdotDiff1\001
+-6
+6 6600 4575 7650 4875
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7650 4875 7650 4575 6600 4575 6600 4875 7650 4875
+4 1 -1 0 0 0 12 0.0000 4 135 975 7125 4800 FcloseSlow1\001
+-6
+6 2400 3075 3000 3375
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3000 3375 3000 3075 2400 3075 2400 3375 3000 3375
+4 1 -1 0 0 0 12 0.0000 4 135 465 2700 3300 Xdot0\001
+-6
+6 5025 3075 5625 3375
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 3375 5625 3075 5025 3075 5025 3375 5625 3375
+4 1 -1 0 0 0 12 0.0000 4 135 465 5325 3300 Xdot1\001
+-6
+6 2400 3600 3150 3900
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3150 3900 3150 3600 2400 3600 2400 3900 3150 3900
+4 1 -1 0 0 0 12 0.0000 4 135 615 2775 3825 InLane0\001
+-6
+6 4875 3600 5625 3900
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 3900 5625 3600 4875 3600 4875 3900 5625 3900
+4 1 -1 0 0 0 12 0.0000 4 135 615 5250 3825 InLane1\001
+-6
+6 2400 1500 3150 1800
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3150 1800 3150 1500 2400 1500 2400 1800 3150 1800
+4 1 -1 0 0 0 12 0.0000 4 135 630 2775 1725 LeftClr0\001
+-6
+6 4875 1500 5625 1800
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 1800 5625 1500 4875 1500 4875 1800 5625 1800
+4 1 -1 0 0 0 12 0.0000 4 135 630 5250 1725 LeftClr1\001
+-6
+6 2400 2025 3300 2325
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3300 2325 3300 2025 2400 2025 2400 2325 3300 2325
+4 1 -1 0 0 0 12 0.0000 4 180 720 2850 2250 RightClr0\001
+-6
+6 4800 2025 5625 2325
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 2325 5625 2025 4800 2025 4800 2325 5625 2325
+4 1 -1 0 0 0 12 0.0000 4 180 720 5250 2250 RightClr1\001
+-6
+6 2400 2550 3300 2850
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3300 2850 3300 2550 2400 2550 2400 2850 3300 2850
+4 1 -1 0 0 0 12 0.0000 4 135 855 2850 2775 LatAction0\001
+-6
+6 4725 2550 5625 2850
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 2850 5625 2550 4725 2550 4725 2850 5625 2850
+4 1 -1 0 0 0 12 0.0000 4 135 855 5175 2775 LatAction1\001
+-6
+6 2400 4200 3450 4500
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3450 4500 3450 4200 2400 4200 2400 4500 3450 4500
+4 1 -1 0 0 0 12 0.0000 4 135 930 2925 4425 FwdAction0\001
+-6
+6 4575 4200 5625 4500
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 4500 5625 4200 4575 4200 4575 4500 5625 4500
+4 1 -1 0 0 0 12 0.0000 4 135 930 5100 4425 FwdAction1\001
+-6
+6 2400 4725 3000 5025
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3000 5025 3000 4725 2400 4725 2400 5025 3000 5025
+4 1 -1 0 0 0 12 0.0000 4 135 465 2700 4950 Ydot0\001
+-6
+6 5025 4725 5625 5025
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5625 5025 5625 4725 5025 4725 5025 5025 5625 5025
+4 1 -1 0 0 0 12 0.0000 4 135 465 5325 4950 Ydot1\001
+-6
+6 2400 5250 3150 5550
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3150 5550 3150 5250 2400 5250 2400 5550 3150 5550
+4 1 -1 0 0 0 12 0.0000 4 180 705 2775 5475 Stopped0\001
+-6
+6 4500 5250 5250 5550
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5250 5550 5250 5250 4500 5250 4500 5550 5250 5550
+4 1 -1 0 0 0 12 0.0000 4 180 705 4875 5475 Stopped1\001
+-6
+6 6450 5325 7200 5625
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7200 5625 7200 5325 6450 5325 6450 5625 7200 5625
+4 1 -1 0 0 0 12 0.0000 4 135 585 6825 5550 BXdot1\001
+-6
+6 2400 5775 3300 6075
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3300 6075 3300 5775 2400 5775 2400 6075 3300 6075
+4 1 -1 0 0 0 12 0.0000 4 180 885 2850 6000 EngStatus0\001
+-6
+6 4575 5775 5475 6075
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5475 6075 5475 5775 4575 5775 4575 6075 5475 6075
+4 1 -1 0 0 0 12 0.0000 4 180 885 5025 6000 EngStatus1\001
+-6
+6 6150 5775 7200 6075
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7200 6075 7200 5775 6150 5775 6150 6075 7200 6075
+4 1 -1 0 0 0 12 0.0000 4 135 960 6675 6000 BcloseFast1\001
+-6
+6 2325 6300 3825 6600
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 3750 6600 3750 6300 2400 6300 2400 6600 3750 6600
+4 1 -1 0 0 0 12 0.0000 4 135 1380 3075 6525 FrontBackStatus0\001
+-6
+6 4050 6300 5550 6600
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 5475 6600 5475 6300 4125 6300 4125 6600 5475 6600
+4 1 -1 0 0 0 12 0.0000 4 135 1380 4800 6525 FrontBackStatus1\001
+-6
+6 6150 6300 7200 6600
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 7200 6600 7200 6300 6150 6300 6150 6600 7200 6600
+4 1 -1 0 0 0 12 0.0000 4 135 885 6675 6525 BYdotDiff1\001
+-6
+6 7650 5025 8250 5325
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 8250 5325 8250 5025 7650 5025 7650 5325 8250 5325
+4 1 -1 0 0 0 12 0.0000 4 135 390 7950 5250 Fclr1\001
+-6
+6 7650 5775 8250 6075
+2 4 0 1 -1 7 0 0 -1 0.000 0 0 7 0 0 5
+ 8250 6075 8250 5775 7650 5775 7650 6075 8250 6075
+4 1 -1 0 0 0 12 0.0000 4 135 405 7950 6000 Bclr1\001
+-6
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 3750 4725 2775
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3000 3225 4875 3675
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5175 5025 5025 5250
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5325 5775 5475 5025
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5400 5775 6450 4200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5175 2850 5325 3075
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5100 4500 5175 4725
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 1650 4800 2550
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3750 6375 4650 4500
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 2700 4650 4200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5475 6000 6450 5475
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7725 5775 7200 5550
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3675 6300 4800 2850
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 6150 6000 5475 6525
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 3
+ 3 1 1.00 60.00 120.00
+ 5625 4800 6375 3750 8850 3750
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 6600 4800 5475 6375
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 6675 6300 6675 6075
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 3
+ 3 1 1.00 60.00 120.00
+ 5475 5850 6150 5175 7650 5175
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3000 3225 5025 3225
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5625 3225 8850 3225
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7125 3450 8850 3300
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 3750 4875 3750
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7125 3450 8850 3675
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 1650 4875 1650
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5625 1650 8700 1650
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 2175 4800 2175
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5625 2175 8550 2175
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 2700 4725 2700
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 2100 4725 2625
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 5625 2700 8700 2700
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3450 4350 4575 4350
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7500 4125 8400 4350
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7125 4275 7125 4575
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3000 4875 5025 4875
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7800 5025 7650 4800
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3150 5400 4500 5400
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7200 5400 8700 5400
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 3300 5925 4575 5925
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7650 5925 7200 5925
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 7200 6450 8400 6450
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 8250 5175 8850 4875
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 8250 5925 8850 5925
+2 4 1 1 -1 -1 0 0 -1 4.000 0 0 20 0 0 5
+ 3975 6700 1875 6700 1875 4100 3975 4100 3975 6700
+2 4 1 1 -1 -1 0 0 -1 4.000 0 0 20 0 0 5
+ 3975 4000 1875 4000 1875 1400 3975 1400 3975 4000
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 6600 9750 6300 8400 6300 8400 6600 9750 6600
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 6075 9750 5775 8850 5775 8850 6075 9750 6075
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 5550 9750 5250 8700 5250 8700 5550 9750 5550
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 5025 9750 4725 8850 4725 8850 5025 9750 5025
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 4500 9750 4200 8400 4200 8400 4500 9750 4500
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 3900 9750 3600 8850 3600 8850 3900 9750 3900
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 3375 9750 3075 8850 3075 8850 3375 9750 3375
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 2850 9750 2550 8700 2550 8700 2850 9750 2850
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 2325 9750 2025 8550 2025 8550 2325 9750 2325
+2 4 0 1 -1 32 0 0 20 0.000 0 0 7 0 0 5
+ 9750 1800 9750 1500 8700 1500 8700 1800 9750 1800
+4 1 -1 0 0 0 12 0.0000 4 135 840 9300 3300 XdotSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 840 9300 3825 YdotSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 1005 9225 1725 LeftClrSens1\001
+4 1 -1 0 0 0 12 0.0000 4 180 1095 9150 2250 RightClrSens1\001
+4 1 -1 0 0 0 12 0.0000 4 180 900 9225 2775 TurnSignal1\001
+4 1 -1 0 0 0 12 0.0000 4 135 1245 9075 4425 FYdotDiffSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 765 9300 4950 FclrSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 960 9225 5475 BXdotSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 780 9300 6000 BclrSens1\001
+4 1 -1 0 0 0 12 0.0000 4 135 1260 9075 6525 BYdotDiffSens1\001
+4 1 -1 0 0 0 16 0.0000 4 165 585 2925 7050 slice t\001
+4 1 -1 0 0 0 16 0.0000 4 165 840 4800 7050 slice t+1\001
+4 1 -1 0 0 0 16 0.0000 4 165 855 9150 7050 evidence\001
+4 0 0 50 0 0 12 0.0000 4 135 180 2025 6525 14\001
+4 0 0 50 0 0 12 0.0000 4 135 90 1950 6075 7\001
+4 0 0 50 0 0 12 0.0000 4 135 180 1950 5475 19\001
+4 0 0 50 0 0 12 0.0000 4 135 180 1950 4950 17\001
+4 0 0 50 0 0 12 0.0000 4 135 180 1950 4425 16\001
+4 0 0 50 0 0 12 0.0000 4 135 180 1950 3825 20\001
+4 0 0 50 0 0 12 0.0000 4 135 180 1950 3375 23\001
+4 0 0 50 0 0 12 0.0000 4 135 180 1950 2775 21\001
+4 0 0 50 0 0 12 0.0000 4 135 180 1950 2250 25\001
+4 0 0 50 0 0 12 0.0000 4 135 180 1950 1800 27\001
+4 0 0 50 0 0 12 0.0000 4 135 90 5925 3600 6\001
+4 0 0 50 0 0 12 0.0000 4 135 180 6225 4200 12\001
+4 0 0 50 0 0 12 0.0000 4 135 180 6375 4800 13\001
+4 0 0 50 0 0 12 0.0000 4 135 180 7875 5025 10\001
+4 0 0 50 0 0 12 0.0000 4 135 90 6300 5475 8\001
+4 0 0 50 0 0 12 0.0000 4 135 90 6000 6075 4\001
+4 0 0 50 0 0 12 0.0000 4 135 90 6000 6600 1\001
+4 0 0 50 0 0 12 0.0000 4 135 90 7575 6150 3\001
+4 0 0 50 0 0 12 0.0000 4 135 180 9825 1725 28\001
+4 0 0 50 0 0 12 0.0000 4 135 180 9825 2250 26\001
+4 0 0 50 0 0 12 0.0000 4 135 180 9825 2775 22\001
+4 0 0 50 0 0 12 0.0000 4 135 180 9825 3300 24\001
+4 0 0 50 0 0 12 0.0000 4 135 180 9825 3825 18\001
+4 0 0 50 0 0 12 0.0000 4 135 180 9825 4425 15\001
+4 0 0 50 0 0 12 0.0000 4 135 180 9825 4950 11\001
+4 0 0 50 0 0 12 0.0000 4 135 90 9825 5475 9\001
+4 0 0 50 0 0 12 0.0000 4 135 90 9825 6075 5\001
+4 0 0 50 0 0 12 0.0000 4 135 90 9825 6525 2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/bic.png
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/bic.png has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/cg1.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/cg1.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,60 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 1200 3000 300 300 1200 3000 1200 3300
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 3900 3000 300 300 3900 3000 3900 3300
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 3900 4500 300 300 3900 4500 3900 4800
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 2100 5700 300 300 2100 5700 2100 6000
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 5700 5700 300 300 5700 5700 5700 6000
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 3000 300 300 6900 3000 6900 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3600 1200 4200 1200 4200 1800 3600 1800 3600 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 6600 1200 7200 1200 7200 1800 6600 1800 6600 1200
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1200 1800 1200 2700
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1200 1800 3600 2850
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1200 1800 3600 4350
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1200 3300 2100 5400
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3750 4800 2100 5400
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4050 4800 5400 5550
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3900 1800 3900 2700
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3900 3300 3900 4200
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6900 1800 4200 4350
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6900 1800 6900 2700
+2 2 0 1 0 0 100 0 2 0.000 0 0 7 0 0 5
+ 900 1200 1500 1200 1500 1800 900 1800 900 1200
+4 0 0 100 0 0 12 0.0000 4 135 180 1050 1650 W\001
+4 0 0 100 0 0 12 0.0000 4 135 300 1050 3150 Min\001
+4 0 0 100 0 0 12 0.0000 4 135 105 3750 1650 F\001
+4 0 0 100 0 0 12 0.0000 4 135 120 3750 3150 E\001
+4 0 0 100 0 0 12 0.0000 4 135 135 3750 4650 D\001
+4 0 0 100 0 0 12 0.0000 4 135 405 1950 5850 Mout\001
+4 0 0 100 0 0 12 0.0000 4 135 120 6750 1650 B\001
+4 0 0 100 0 0 12 0.0000 4 135 120 6750 3150 C\001
+4 0 0 100 0 0 12 0.0000 4 135 105 5550 5850 L\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/cg1.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/cg1.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.T5.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.T5.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,323 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 300 600 4500 9900
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 5100 300 300 4200 5100 4200 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 7200 300 300 4200 7200 4200 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 9000 300 300 4200 9000 4200 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 900 300 300 4200 900 4200 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 3000 300 300 4200 3000 4200 3300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 5100 300 300 1500 5100 1500 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 7200 300 300 1500 7200 1500 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 9000 300 300 1500 9000 1500 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 900 300 300 1500 900 1500 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 3000 300 300 1500 3000 1500 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 1200 900 1200 900 1800 300 1800 300 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 3300 900 3300 900 3900 300 3900 300 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 5400 900 5400 900 6000 300 6000 300 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 7500 900 7500 900 8100 300 8100 300 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 9300 900 9300 900 9900 300 9900 300 9300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 1200 3600 1200 3600 1800 3000 1800 3000 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 3300 3600 3300 3600 3900 3000 3900 3000 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 5400 3600 5400 3600 6000 3000 6000 3000 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 7500 3600 7500 3600 8100 3000 8100 3000 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 9300 3600 9300 3600 9900 3000 9900 3000 9300
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1200 3900 1050
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3300 3900 3000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5400 3900 5100
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7500 3900 7200
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9300 3900 9000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9600 3000 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9600 3000 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1200 1200 1050
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3300 1200 3000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5400 1200 5100
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7500 1200 7200
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9300 1200 9000
+-6
+6 3600 600 7200 9900
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 5100 300 300 6900 5100 6900 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 7200 300 300 6900 7200 6900 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 9000 300 300 6900 9000 6900 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 900 300 300 6900 900 6900 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 3000 300 300 6900 3000 6900 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 1200 6300 1200 6300 1800 5700 1800 5700 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 3300 6300 3300 6300 3900 5700 3900 5700 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 5400 6300 5400 6300 6000 5700 6000 5700 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 7500 6300 7500 6300 8100 5700 8100 5700 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 9300 6300 9300 6300 9900 5700 9900 5700 9300
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 1200 6600 1050
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 3300 6600 3000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 5400 6600 5100
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 7500 6600 7200
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 9300 6600 9000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 7800
+-6
+6 6300 600 9900 9900
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 9600 5100 300 300 9600 5100 9600 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 9600 7200 300 300 9600 7200 9600 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 9600 9000 300 300 9600 9000 9600 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 9600 900 300 300 9600 900 9600 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 9600 3000 300 300 9600 3000 9600 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 8400 1200 9000 1200 9000 1800 8400 1800 8400 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 8400 3300 9000 3300 9000 3900 8400 3900 8400 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 8400 5400 9000 5400 9000 6000 8400 6000 8400 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 8400 7500 9000 7500 9000 8100 8400 8100 8400 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 8400 9300 9000 9300 9000 9900 8400 9900 8400 9300
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 1200 9300 1050
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 3300 9300 3000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 5400 9300 5100
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 7500 9300 7200
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 9300 9300 9000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 1500 8400 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 3600 8400 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 1500 8400 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 3600 8400 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 5700 8400 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 3600 8400 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 5700 8400 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 5700 8400 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 7800 8400 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 7800 8400 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 7800 8400 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 9600 8400 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 9600 8400 7800
+-6
+6 9000 600 12600 9900
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 12300 5100 300 300 12300 5100 12300 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 12300 7200 300 300 12300 7200 12300 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 12300 9000 300 300 12300 9000 12300 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 12300 900 300 300 12300 900 12300 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 12300 3000 300 300 12300 3000 12300 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 11100 1200 11700 1200 11700 1800 11100 1800 11100 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 11100 3300 11700 3300 11700 3900 11100 3900 11100 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 11100 5400 11700 5400 11700 6000 11100 6000 11100 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 11100 7500 11700 7500 11700 8100 11100 8100 11100 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 11100 9300 11700 9300 11700 9900 11100 9900 11100 9300
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 11700 1200 12000 1050
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 11700 3300 12000 3000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 11700 5400 12000 5100
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 11700 7500 12000 7200
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 11700 9300 12000 9000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 1500 11100 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 3600 11100 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 1500 11100 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 3600 11100 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 5700 11100 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 3600 11100 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 5700 11100 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 5700 11100 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 7800 11100 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 7800 11100 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 7800 11100 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 9600 11100 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 9000 9600 11100 7800
+-6
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,187 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 225 1125 975 9975
+6 225 1125 975 9975
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 1200 900 1200 900 1800 300 1800 300 1200
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 3300 900 3300 900 3900 300 3900 300 3300
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 5400 900 5400 900 6000 300 6000 300 5400
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 7500 900 7500 900 8100 300 8100 300 7500
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 9300 900 9300 900 9900 300 9900 300 9300
+-6
+-6
+6 2925 1125 3675 9975
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 1200 3600 1200 3600 1800 3000 1800 3000 1200
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 3300 3600 3300 3600 3900 3000 3900 3000 3300
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 5400 3600 5400 3600 6000 3000 6000 3000 5400
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 7500 3600 7500 3600 8100 3000 8100 3000 7500
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 9300 3600 9300 3600 9900 3000 9900 3000 9300
+-6
+6 5625 1125 6375 9975
+6 5625 1125 6375 9975
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 1200 6300 1200 6300 1800 5700 1800 5700 1200
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 3300 6300 3300 6300 3900 5700 3900 5700 3300
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 5400 6300 5400 6300 6000 5700 6000 5700 5400
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 7500 6300 7500 6300 8100 5700 8100 5700 7500
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 9300 6300 9300 6300 9900 5700 9900 5700 9300
+-6
+-6
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 1500 7200 300 300 1500 7200 1500 7500
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 1500 9000 300 300 1500 9000 1500 9300
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 4200 9000 300 300 4200 9000 4200 9300
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 4200 7200 300 300 4200 7200 4200 7500
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 1500 5100 300 300 1500 5100 1500 5400
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 1500 3000 300 300 1500 3000 1500 3300
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 1500 900 300 300 1500 900 1500 1200
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 4200 900 300 300 4200 900 4200 1200
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 4200 3000 300 300 4200 3000 4200 3300
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 4200 5100 300 300 4200 5100 4200 5400
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 6900 9000 300 300 6900 9000 6900 9300
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 6900 7200 300 300 6900 7200 6900 7500
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 6900 5100 300 300 6900 5100 6900 5400
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 6900 3000 300 300 6900 3000 6900 3300
+1 3 0 2 7 0 50 0 20 0.000 1 0.0000 6900 900 300 300 6900 900 6900 1200
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 7500 1200 7200
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 5400 1200 5100
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 3300 1200 3000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 1200 1200 1050
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 1200 3900 1050
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 1500 3000 1500
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 3600 3000 1500
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 1500 3000 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 3600 3000 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 3600 3000 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 5700 3000 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 3300 3900 3000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 3600 5700 1500
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 3600 5700 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 5700 3000 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 7800 3000 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 5700 3000 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 7800 3000 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 9600 3000 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 9300 1200 9000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 7800 3000 9600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 9600 3000 9600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 9600 5700 9600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 9300 3900 9000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 9600 5700 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 7800 5700 9600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 7800 5700 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 7800 5700 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 7500 3900 7200
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 5700 5700 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 5700 5700 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 5700 5700 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 3600 5700 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 1500 5700 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 1500 5700 1500
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 1200 6600 1050
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 3300 6600 3000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 5400 6600 5100
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 9300 6600 9000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 7500 6600 7200
+2 1 0 3 0 0 100 0 5 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 5400 3900 5100
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.influence.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.influence.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,200 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 5100 300 300 4200 5100 4200 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 7200 300 300 4200 7200 4200 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 9000 300 300 4200 9000 4200 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 900 300 300 4200 900 4200 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 3000 300 300 4200 3000 4200 3300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 5100 300 300 1500 5100 1500 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 7200 300 300 1500 7200 1500 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 9000 300 300 1500 9000 1500 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 900 300 300 1500 900 1500 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 3000 300 300 1500 3000 1500 3300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 5100 300 300 6900 5100 6900 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 7200 300 300 6900 7200 6900 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 9000 300 300 6900 9000 6900 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 900 300 300 6900 900 6900 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 3000 300 300 6900 3000 6900 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 1200 900 1200 900 1800 300 1800 300 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 3300 900 3300 900 3900 300 3900 300 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 5400 900 5400 900 6000 300 6000 300 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 7500 900 7500 900 8100 300 8100 300 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 9300 900 9300 900 9900 300 9900 300 9300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 1200 3600 1200 3600 1800 3000 1800 3000 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 3300 3600 3300 3600 3900 3000 3900 3000 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 5400 3600 5400 3600 6000 3000 6000 3000 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 7500 3600 7500 3600 8100 3000 8100 3000 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 9300 3600 9300 3600 9900 3000 9900 3000 9300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 1200 6300 1200 6300 1800 5700 1800 5700 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 3300 6300 3300 6300 3900 5700 3900 5700 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 5400 6300 5400 6300 6000 5700 6000 5700 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 7500 6300 7500 6300 8100 5700 8100 5700 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 9300 6300 9300 6300 9900 5700 9900 5700 9300
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7500 1200 7200
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9300 1200 9000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9600 3000 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9300 3900 9000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7500 3900 7200
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 7500 6600 7200
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 9300 6600 9000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5400 3900 5100
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 5400 6600 5100
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5400 1200 5100
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3300 1200 3000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1200 1200 1050
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1200 3900 1050
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3300 3900 3000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 3300 6600 3000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 1200 6600 1050
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 900 9600 3000 7800
+3 2 1 3 0 7 100 0 -1 8.000 0 1 0 4
+ 2 0 6.00 180.00 360.00
+ 750 1950 2775 3750 975 5475 2925 5550
+ 0.000 -1.000 -1.000 0.000
+3 0 1 3 0 7 100 0 -1 8.000 0 1 0 3
+ 2 0 6.00 180.00 360.00
+ 900 1875 3900 3450 5700 5550
+ 0.000 1.000 0.000
+4 0 0 100 0 0 25 0.0000 4 255 435 375 1575 A1\001
+4 0 0 100 0 0 25 0.0000 4 255 420 375 3750 B1\001
+4 0 0 100 0 0 25 0.0000 4 255 420 375 5775 C1\001
+4 0 0 100 0 0 25 0.0000 4 255 435 375 7950 D1\001
+4 0 0 100 0 0 25 0.0000 4 255 405 375 9750 E1\001
+4 0 0 100 0 0 25 0.0000 4 255 435 3075 1575 A2\001
+4 0 0 100 0 0 25 0.0000 4 255 420 3075 3750 B2\001
+4 0 0 100 0 0 25 0.0000 4 255 420 3075 5775 C2\001
+4 0 0 100 0 0 25 0.0000 4 255 435 3075 7875 D2\001
+4 0 0 100 0 0 25 0.0000 4 255 405 3075 9675 E2\001
+4 0 0 100 0 0 25 0.0000 4 255 435 5775 1575 A3\001
+4 0 0 100 0 0 25 0.0000 4 255 420 5700 3675 B3\001
+4 0 0 100 0 0 25 0.0000 4 255 420 5850 5775 C3\001
+4 0 0 100 0 0 25 0.0000 4 255 435 5775 7875 D3\001
+4 0 0 100 0 0 25 0.0000 4 255 405 5850 9675 E3\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.named.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.named.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,192 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 5100 300 300 4200 5100 4200 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 7200 300 300 4200 7200 4200 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 9000 300 300 4200 9000 4200 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 900 300 300 4200 900 4200 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 3000 300 300 4200 3000 4200 3300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 5100 300 300 1500 5100 1500 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 7200 300 300 1500 7200 1500 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 9000 300 300 1500 9000 1500 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 900 300 300 1500 900 1500 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 3000 300 300 1500 3000 1500 3300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 5100 300 300 6900 5100 6900 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 7200 300 300 6900 7200 6900 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 9000 300 300 6900 9000 6900 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 900 300 300 6900 900 6900 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 3000 300 300 6900 3000 6900 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 1200 900 1200 900 1800 300 1800 300 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 3300 900 3300 900 3900 300 3900 300 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 5400 900 5400 900 6000 300 6000 300 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 7500 900 7500 900 8100 300 8100 300 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 9300 900 9300 900 9900 300 9900 300 9300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 1200 3600 1200 3600 1800 3000 1800 3000 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 3300 3600 3300 3600 3900 3000 3900 3000 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 5400 3600 5400 3600 6000 3000 6000 3000 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 7500 3600 7500 3600 8100 3000 8100 3000 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 9300 3600 9300 3600 9900 3000 9900 3000 9300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 1200 6300 1200 6300 1800 5700 1800 5700 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 3300 6300 3300 6300 3900 5700 3900 5700 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 5400 6300 5400 6300 6000 5700 6000 5700 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 7500 6300 7500 6300 8100 5700 8100 5700 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 9300 6300 9300 6300 9900 5700 9900 5700 9300
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7500 1200 7200
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9300 1200 9000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9600 3000 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9300 3900 9000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7500 3900 7200
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 7500 6600 7200
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 9300 6600 9000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5400 3900 5100
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 5400 6600 5100
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5400 1200 5100
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3300 1200 3000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1200 1200 1050
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1200 3900 1050
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3300 3900 3000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 3300 6600 3000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 1200 6600 1050
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 900 9600 3000 7800
+4 0 0 100 0 0 25 0.0000 4 255 435 375 1575 A1\001
+4 0 0 100 0 0 25 0.0000 4 255 420 375 3750 B1\001
+4 0 0 100 0 0 25 0.0000 4 255 420 375 5775 C1\001
+4 0 0 100 0 0 25 0.0000 4 255 435 375 7950 D1\001
+4 0 0 100 0 0 25 0.0000 4 255 405 375 9750 E1\001
+4 0 0 100 0 0 25 0.0000 4 255 435 3075 1575 A2\001
+4 0 0 100 0 0 25 0.0000 4 255 420 3075 3750 B2\001
+4 0 0 100 0 0 25 0.0000 4 255 420 3075 5775 C2\001
+4 0 0 100 0 0 25 0.0000 4 255 435 3075 7875 D2\001
+4 0 0 100 0 0 25 0.0000 4 255 405 3075 9675 E2\001
+4 0 0 100 0 0 25 0.0000 4 255 435 5775 1575 A3\001
+4 0 0 100 0 0 25 0.0000 4 255 420 5700 3675 B3\001
+4 0 0 100 0 0 25 0.0000 4 255 420 5850 5775 C3\001
+4 0 0 100 0 0 25 0.0000 4 255 435 5775 7875 D3\001
+4 0 0 100 0 0 25 0.0000 4 255 405 5850 9675 E3\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.small.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/chmm5.small.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,181 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 300 600 4500 9900
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 5100 300 300 4200 5100 4200 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 7200 300 300 4200 7200 4200 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 9000 300 300 4200 9000 4200 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 900 300 300 4200 900 4200 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 3000 300 300 4200 3000 4200 3300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 5100 300 300 1500 5100 1500 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 7200 300 300 1500 7200 1500 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 9000 300 300 1500 9000 1500 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 900 300 300 1500 900 1500 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 3000 300 300 1500 3000 1500 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 1200 900 1200 900 1800 300 1800 300 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 3300 900 3300 900 3900 300 3900 300 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 5400 900 5400 900 6000 300 6000 300 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 7500 900 7500 900 8100 300 8100 300 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 300 9300 900 9300 900 9900 300 9900 300 9300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 1200 3600 1200 3600 1800 3000 1800 3000 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 3300 3600 3300 3600 3900 3000 3900 3000 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 5400 3600 5400 3600 6000 3000 6000 3000 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 7500 3600 7500 3600 8100 3000 8100 3000 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 3000 9300 3600 9300 3600 9900 3000 9900 3000 9300
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1200 3900 1050
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3300 3900 3000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5400 3900 5100
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7500 3900 7200
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9300 3900 9000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9600 3000 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9600 3000 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1200 1200 1050
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3300 1200 3000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5400 1200 5100
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7500 1200 7200
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9300 1200 9000
+-6
+6 3600 600 7200 9900
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 5100 300 300 6900 5100 6900 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 7200 300 300 6900 7200 6900 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 9000 300 300 6900 9000 6900 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 900 300 300 6900 900 6900 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 3000 300 300 6900 3000 6900 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 1200 6300 1200 6300 1800 5700 1800 5700 1200
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 3300 6300 3300 6300 3900 5700 3900 5700 3300
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 5400 6300 5400 6300 6000 5700 6000 5700 5400
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 7500 6300 7500 6300 8100 5700 8100 5700 7500
+2 2 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 5
+ 5700 9300 6300 9300 6300 9900 5700 9900 5700 9300
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 1200 6600 1050
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 3300 6600 3000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 5400 6600 5100
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 7500 6600 7200
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 9300 6600 9000
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 1500
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 3600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 5700
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 7800
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 9600
+2 1 0 1 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 7800
+-6
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/chmm5_circle.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/chmm5_circle.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,162 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 5100 300 300 4200 5100 4200 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 7200 300 300 4200 7200 4200 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 9000 300 300 4200 9000 4200 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 900 300 300 4200 900 4200 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 4200 3000 300 300 4200 3000 4200 3300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 5100 300 300 1500 5100 1500 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 7200 300 300 1500 7200 1500 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 9000 300 300 1500 9000 1500 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 900 300 300 1500 900 1500 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1500 3000 300 300 1500 3000 1500 3300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 5100 300 300 6900 5100 6900 5400
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 7200 300 300 6900 7200 6900 7500
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 9000 300 300 6900 9000 6900 9300
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 900 300 300 6900 900 6900 1200
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 6900 3000 300 300 6900 3000 6900 3300
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 602 3534 300 300 602 3534 602 3834
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 602 5634 300 300 602 5634 602 5934
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 602 7734 300 300 602 7734 602 8034
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 602 9459 300 300 602 9459 602 9759
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 600 1425 300 300 600 1425 600 1725
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 3300 1500 300 300 3300 1500 3300 1800
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 3300 3600 300 300 3300 3600 3300 3900
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 3300 5700 300 300 3300 5700 3300 6000
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 3300 7800 300 300 3300 7800 3300 8100
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 3300 9600 300 300 3300 9600 3300 9900
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 6077 1509 300 300 6077 1509 6077 1809
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 6077 3534 300 300 6077 3534 6077 3834
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 6075 5700 300 300 6075 5700 6075 6000
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 6077 7809 300 300 6077 7809 6077 8109
+1 3 0 1 0 7 100 0 -1 0.000 1 0.0000 6002 9534 300 300 6002 9534 6002 9834
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7500 1200 7200
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9300 1200 9000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 9600 3000 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 7800 3000 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9300 3900 9000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 9600 5700 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 9600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7800 5700 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 7500 3900 7200
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 7800
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 7500 6600 7200
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 9300 6600 9000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5700 5700 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 5400 3900 5100
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 5400 6600 5100
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5700 3000 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 5400 1200 5100
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 5700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3600 3000 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 3300 1200 3000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1200 1200 1050
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1500 3000 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1200 3900 1050
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 1500 5700 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 1500
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3300 3900 3000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 3300 6600 3000
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3600 3600 5700 3600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6300 1200 6600 1050
+2 1 0 2 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 900 9600 3000 7800
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/chmm5_nobold.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/chmm5_nobold.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,187 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 225 1125 975 9975
+6 225 1125 975 9975
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 1200 900 1200 900 1800 300 1800 300 1200
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 3300 900 3300 900 3900 300 3900 300 3300
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 5400 900 5400 900 6000 300 6000 300 5400
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 7500 900 7500 900 8100 300 8100 300 7500
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 300 9300 900 9300 900 9900 300 9900 300 9300
+-6
+-6
+6 2925 1125 3675 9975
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 1200 3600 1200 3600 1800 3000 1800 3000 1200
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 3300 3600 3300 3600 3900 3000 3900 3000 3300
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 5400 3600 5400 3600 6000 3000 6000 3000 5400
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 7500 3600 7500 3600 8100 3000 8100 3000 7500
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 3000 9300 3600 9300 3600 9900 3000 9900 3000 9300
+-6
+6 5625 1125 6375 9975
+6 5625 1125 6375 9975
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 1200 6300 1200 6300 1800 5700 1800 5700 1200
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 3300 6300 3300 6300 3900 5700 3900 5700 3300
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 5400 6300 5400 6300 6000 5700 6000 5700 5400
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 7500 6300 7500 6300 8100 5700 8100 5700 7500
+2 2 0 4 0 0 100 0 -1 0.000 0 0 7 0 0 5
+ 5700 9300 6300 9300 6300 9900 5700 9900 5700 9300
+-6
+-6
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 6900 7200 300 300 6900 7200 6900 7500
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 6900 9000 300 300 6900 9000 6900 9300
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 4200 9000 300 300 4200 9000 4200 9300
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 4200 7200 300 300 4200 7200 4200 7500
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 4200 5100 300 300 4200 5100 4200 5400
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 4200 3000 300 300 4200 3000 4200 3300
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 4200 900 300 300 4200 900 4200 1200
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 1500 900 300 300 1500 900 1500 1200
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 1500 3000 300 300 1500 3000 1500 3300
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 1500 5100 300 300 1500 5100 1500 5400
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 1500 7200 300 300 1500 7200 1500 7500
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 1500 9000 300 300 1500 9000 1500 9300
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 6900 5100 300 300 6900 5100 6900 5400
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 6900 3000 300 300 6900 3000 6900 3300
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 6900 900 300 300 6900 900 6900 1200
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 7500 1200 7200
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 5400 1200 5100
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 3300 1200 3000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 1200 1200 1050
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 1200 3900 1050
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 1500 3000 1500
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 3600 3000 1500
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 1500 3000 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 3600 3000 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 3600 3000 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 5700 3000 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 3300 3900 3000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 3600 5700 1500
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 3600 5700 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 5700 3000 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 7800 3000 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 5700 3000 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 7800 3000 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 9600 3000 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 9300 1200 9000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 7800 3000 9600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 900 9600 3000 9600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 9600 5700 9600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 9300 3900 9000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 9600 5700 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 7800 5700 9600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 7800 5700 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 7800 5700 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 7500 3900 7200
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 5700 5700 7800
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 5700 5700 5700
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 5700 5700 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 3600 5700 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 1500 5700 3600
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 1500 5700 1500
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 1200 6600 1050
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 3300 6600 3000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 5400 6600 5100
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 9300 6600 9000
+2 1 0 3 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 6300 7500 6600 7200
+2 1 0 3 0 0 100 0 5 0.000 0 0 7 1 0 2
+ 0 0 1.50 90.00 180.00
+ 3600 5400 3900 5100
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa.eps
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/fa.eps Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,356 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%Creator: (ImageMagick)
+%%Title: (fa.eps)
+%%CreationDate: (Tue Nov 16 19:51:50 2004)
+%%BoundingBox: 0 0 131 161
+%%DocumentData: Clean7Bit
+%%LanguageLevel: 1
+%%Pages: 1
+%%EndComments
+
+%%BeginDefaults
+%%EndDefaults
+
+%%BeginProlog
+%
+% Display a color image. The image is displayed in color on
+% Postscript viewers or printers that support color, otherwise
+% it is displayed as grayscale.
+%
+/DirectClassPacket
+{
+ %
+ % Get a DirectClass packet.
+ %
+ % Parameters:
+ % red.
+ % green.
+ % blue.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/DirectClassImage
+{
+ %
+ % Display a DirectClass image.
+ %
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { DirectClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayDirectClassPacket } image
+ } ifelse
+} bind def
+
+/GrayDirectClassPacket
+{
+ %
+ % Get a DirectClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % red
+ % green
+ % blue
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/GrayPseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassImage
+{
+ %
+ % Display a PseudoClass image.
+ %
+ % Parameters:
+ % class: 0-PseudoClass or 1-Grayscale.
+ %
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ class 0 gt
+ {
+ currentfile buffer readline pop
+ token pop /depth exch def pop
+ /grays columns 8 add depth sub depth mul 8 idiv string def
+ columns rows depth
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { currentfile grays readhexstring pop } image
+ }
+ {
+ %
+ % Parameters:
+ % colors: number of colors in the colormap.
+ % colormap: red, green, blue color packets.
+ %
+ currentfile buffer readline pop
+ token pop /colors exch def pop
+ /colors colors 3 mul def
+ /colormap colors string def
+ currentfile colormap readhexstring pop pop
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { PseudoClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayPseudoClassPacket } image
+ } ifelse
+ } ifelse
+} bind def
+
+/DisplayImage
+{
+ %
+ % Display a DirectClass or PseudoClass image.
+ %
+ % Parameters:
+ % x & y translation.
+ % x & y scale.
+ % label pointsize.
+ % image label.
+ % image columns & rows.
+ % class: 0-DirectClass or 1-PseudoClass.
+ % compression: 0-none or 1-RunlengthEncoded.
+ % hex color packets.
+ %
+ gsave
+ /buffer 512 string def
+ /byte 1 string def
+ /color_packet 3 string def
+ /pixels 768 string def
+
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ x y translate
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ currentfile buffer readline pop
+ token pop /pointsize exch def pop
+ /Times-Roman findfont pointsize scalefont setfont
+ x y scale
+ currentfile buffer readline pop
+ token pop /columns exch def
+ token pop /rows exch def pop
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ currentfile buffer readline pop
+ token pop /compression exch def pop
+ class 0 gt { PseudoClassImage } { DirectClassImage } ifelse
+ grestore
+} bind def
+%%EndProlog
+%%Page: 1 1
+%%PageBoundingBox: 0 0 131 161
+userdict begin
+DisplayImage
+0 0
+131 161
+12.000000
+131 161
+1
+1
+1
+1
+ffffffffffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffff
+ffffffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffff
+ffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffff
+ffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffff
+ffffffffffffffffe0fffffffffff001ffffffffffffffffffe0ffffffffff8ffe3fffff
+ffffffffffffe0fffffffffc7fffc7ffffffffffffffffe0fffffffffbfffffbffffffff
+ffffffffe0ffffffffe7fffffcffffffffffffffffe0ffffffffdfffffff7fffffffffff
+ffffe0ffffffffbfffffffbfffffffffffffffe0ffffffff7fffffffdfffffffffffffff
+e0fffffffeffffffffefffffffffffffffe0fffffffeffffffffefffffffffffffffe0ff
+fffffdfffffffff7ffffffffffffffe0fffffffdfe31fffff7ffffffffffffffe0ffffff
+fbff7bfffffbffffffffffffffe0fffffffbffb7fffffbffffffffffffffe0fffffffbff
+8ffffffbffffffffffffffe0fffffffbffcffffffbffffffffffffffe0fffffffbffa7ff
+fffbffffffffffffffe0fffffffbffb7fffffbffffffffffffffe0fffffffbff7bfffffb
+ffffffffffffffe0fffffffdfe31fffff7ffffffffffffffe0fffffffdfffffffff7ffff
+ffffffffffe0fffffffeffffffffefffffffffffffffe0fffffffeffffffffefffffffff
+ffffffe0ffffffff7fffffffdfffffffffffffffe0ffffffffbfffffffbfffffffffffff
+ffe0ffffffffdfffffff7fffffffffffffffe0ffffffffe7fffffcffffffffffffffffe0
+fffffffffbfffffbffffffffffffffffe0fffffffffc7fffc7ffffffffffffffffe0ffff
+ffffff8ffe3fffffffffffffffffe0fffffffffff001ffffffffffffffffffe0ffffffff
+ffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffff
+ffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffbfff
+ffffffffffffffffe0ffffffffffffbfffffffffffffffffffe0ffffffffffffbfffffff
+ffffffffffffe0ffffffffffffbfffffffffffffffffffe0ffffffffffffbfffffffffff
+ffffffffe0ffffffffffffbfffffffffffffffffffe0ffffffffffffbfffffffffffffff
+ffffe0ffffffffffffbfffffffffffffffffffe0ffffffffffffbfffffffffffffffffff
+e0ffffffffffffbfffffffffffffffffffe0ffffffffffffbfffffffffffffffffffe0ff
+ffffffffffbfffffffffffffffffffe0ffffffffffffbfffffffffffffffffffe0ffffff
+ffffffbfffffffffffffffffffe0ffffffffffffbfffffffffffffffffffe0ffffffffff
+ffbfffffffffffffffffffe0ffffffffffffbfffffffffffffffffffe0ffffffffffffbf
+ffffffffffffffffffe0ffffffffffffbfffffffffffffffffffe0ffffffffffffbfffff
+ffffffffffffffe0ffffffffffffbfffffffffffffffffffe0ffffffffffffbfffffffff
+ffffffffffe0ffffffffffffbfffffffffffffffffffe0ffffffffffffbfffffffffffff
+ffffffe0ffffffffffffbfffffffffffffffffffe0fffffffffffeafffffffffffffffff
+ffe0fffffffffffeafffffffffffffffffffe0fffffffffffe9fffffffffffffffffffe0
+ffffffffffff1fffffffffffffffffffe0ffffffffffff1fffffffffffffffffffe0ffff
+ffffffff1fffffffffffffffffffe0ffffffffffff3fffffffffffffffffffe0ffffffff
+ffffbfffffffffffffffffffe0ffffffffffffbfffffffffffffffffffe0ffffffffffff
+ffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffff
+ffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffff
+ffffffffffffe0ffffffffffffffffffffffffffffffffe0fffffffffff001ffffffffff
+ffffffffe0ffffffffff8ffe3fffffffffffffffffe0fffffffffc6eeec7ffffffffffff
+ffffe0fffffffffbfffffbffffffffffffffffe0ffffffffe3bbfbb8ffffffffffffffff
+e0ffffffffdfffffff7fffffffffffffffe0ffffffffaeeeeeeebfffffffffffffffe0ff
+ffffff7fffffffdfffffffffffffffe0fffffffebfbfbfbfafffffffffffffffe0ffffff
+feffffffffefffffffffffffffe0fffffffceeeeeeeee7ffffffffffffffe0fffffffdfe
+38fffff7ffffffffffffffe0fffffffbfb79fbfbfbffffffffffffffe0fffffffbffbbff
+fffbffffffffffffffe0fffffffaeecaeeeeebffffffffffffffe0fffffffbffc7fffffb
+ffffffffffffffe0fffffffbbfafbfbfbbffffffffffffffe0fffffffbffeffffffbffff
+ffffffffffe0fffffffaeeeeeeeeebffffffffffffffe0fffffffdffc7fffff7ffffffff
+ffffffe0fffffffdfbbbfbbbf7ffffffffffffffe0fffffffeffffffffefffffffffffff
+ffe0fffffffeeeeeeeeeefffffffffffffffe0ffffffff7fffffffdfffffffffffffffe0
+ffffffffbfbfbfbfbfffffffffffffffe0ffffffffdfffffff7fffffffffffffffe0ffff
+ffffe6eeeeecffffffffffffffffe0fffffffffbfffffbffffffffffffffffe0ffffffff
+fc7bfbc7ffffffffffffffffe0ffffffffff8ffe3fffffffffffffffffe0fffffffffff0
+01ffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffff
+ffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffff
+ffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffffffff
+ffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffffffffffff
+ffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffff
+e0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ff
+ffffffffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffff
+ffffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffff
+ffffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffff
+ffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ff80ffffffffbfffcf
+ffdff01c5f7fffe0ffdeffffffffbfffeffffff5c99f7fffe0ffdffffbffff5fffefffff
+f5ebdebfffe0ffdde790cd3f5d3ce9989c6dc7febfffe0ffc1db6bb67eeedb6db6db6c17
+fddfffe0ffdde37bb6fe0edc6d33d9edf7fc1fffe0ffdfdb7bb6feeedb6d7cde5dfbdddf
+ffe0ffdfdb6bb6fdf6db6e76db5df99befffe0ff87e59ccc78e04c86f188d8fc31c7ffe0
+fffffffffffffffffeffffffffffffffe0fffffffffffffffffdffffffffffffffe0ffff
+fffffffffffff9ffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffff
+ffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffff
+ffffffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffff
+ffffffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffff
+ffffffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffffffff
+ffffffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffffffffffff
+ffffe0ffffffffffffffffffffffffffffffffe0ffffffffffffffffffffffffffffffff
+e0
+end
+%%PageTrailer
+%%Trailer
+%%EOF
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/fa.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 900 1950 300 225 900 1950 1200 2175
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 900 825 300 225 900 825 1200 1050
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1125 900 1650
+4 0 -1 0 0 0 12 0.0000 4 135 135 825 900 X\001
+4 0 -1 0 0 0 12 0.0000 4 135 135 825 2025 Y\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/fa.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_caption.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/fa_caption.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,13 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 900 1950 300 225 900 1950 1200 2175
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 900 825 300 225 900 825 1200 1050
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1125 900 1650
+4 0 -1 0 0 0 12 0.0000 4 135 135 750 2025 Y\001
+4 0 -1 0 0 0 12 0.0000 4 135 120 750 900 X\001
+4 0 -1 0 0 0 12 0.0000 4 180 1620 300 2625 Factor Analysis/PCA\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_discrete.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/fa_discrete.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+#FIG 3.2
+Portrait
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 975 600 300 225 975 600 1275 825
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2100 600 300 225 2100 600 2400 825
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 825 525 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 825 1425 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2025 825 600 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2025 825 1425 1425
+2 2 0 1 -1 0 0 0 2 0.000 0 0 7 0 0 5
+ 300 1425 750 1425 750 1875 300 1875 300 1425
+2 2 0 1 -1 0 0 0 2 0.000 0 0 7 0 0 5
+ 1275 1425 1725 1425 1725 1875 1275 1875 1275 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 825 3000 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2025 825 3000 1425
+2 2 0 1 0 0 100 0 2 3.000 0 0 7 0 0 5
+ 2775 1425 3225 1425 3225 1875 2775 1875 2775 1425
+4 0 -1 0 0 0 24 0.0000 4 30 270 1350 750 ...\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 1350 1725 R2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 825 675 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1950 675 Xn\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 375 1725 R1\001
+4 0 -1 0 0 0 12 0.0000 4 180 1920 600 2325 Discrete Factor Analysis\001
+4 0 -1 0 0 0 24 0.0000 4 30 270 2100 1650 ...\001
+4 0 0 100 0 -1 12 0.0000 4 135 210 2850 1725 Rn\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_discrete.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/fa_discrete.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_discrete_single.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/fa_discrete_single.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,18 @@
+#FIG 3.2
+Portrait
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 1 0 1 0 7 100 0 -1 0.000 1 0.0000 825 375 300 225 825 375 1125 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 600 825 1275
+2 2 0 1 0 0 100 0 2 0.000 0 0 7 0 0 5
+ 600 1275 975 1275 975 1725 600 1725 600 1275
+4 0 -1 0 0 0 12 0.0000 4 135 120 750 1575 R\001
+4 0 -1 0 0 0 12 0.0000 4 135 135 750 450 X\001
+4 0 0 100 0 0 12 0.0000 4 180 1785 300 2025 discrete factor analysis\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_regular.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/fa_regular.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+6 225 300 3450 3150
+1 3 0 1 -1 7 0 0 -1 0.000 1 0.0000 999 1082 335 335 999 1082 1149 1382
+1 3 0 1 -1 7 0 0 -1 0.000 1 0.0000 2124 1082 335 335 2124 1082 2274 1382
+1 3 0 1 -1 0 0 0 2 0.000 1 0.0000 624 2807 335 335 624 2807 774 3107
+1 3 0 1 -1 0 0 0 2 0.000 1 0.0000 1674 2807 335 335 1674 2807 1824 3107
+1 3 0 1 -1 0 0 0 2 0.000 1 0.0000 2874 2807 335 335 2874 2807 3024 3107
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1425 675 2475
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 975 1425 1575 2400
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 975 1425 2775 2475
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 1425 750 2475
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 1425 1650 2325
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 1425 2775 2400
+4 0 -1 0 0 0 24 0.0000 4 330 3225 225 600 regular factor analysis\001
+-6
+4 0 -1 0 0 0 12 0.0000 4 135 210 825 1200 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 1950 1200 X2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 525 2925 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1500 2850 Y2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 2700 2850 Y3\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_regular.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/fa_regular.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_scalar.eps
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/fa_scalar.eps Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,414 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%Creator: (ImageMagick)
+%%Title: (fa_scalar.eps)
+%%CreationDate: (Tue Nov 16 19:52:00 2004)
+%%BoundingBox: 0 0 246 156
+%%DocumentData: Clean7Bit
+%%LanguageLevel: 1
+%%Pages: 1
+%%EndComments
+
+%%BeginDefaults
+%%EndDefaults
+
+%%BeginProlog
+%
+% Display a color image. The image is displayed in color on
+% Postscript viewers or printers that support color, otherwise
+% it is displayed as grayscale.
+%
+/DirectClassPacket
+{
+ %
+ % Get a DirectClass packet.
+ %
+ % Parameters:
+ % red.
+ % green.
+ % blue.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/DirectClassImage
+{
+ %
+ % Display a DirectClass image.
+ %
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { DirectClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayDirectClassPacket } image
+ } ifelse
+} bind def
+
+/GrayDirectClassPacket
+{
+ %
+ % Get a DirectClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % red
+ % green
+ % blue
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/GrayPseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassImage
+{
+ %
+ % Display a PseudoClass image.
+ %
+ % Parameters:
+ % class: 0-PseudoClass or 1-Grayscale.
+ %
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ class 0 gt
+ {
+ currentfile buffer readline pop
+ token pop /depth exch def pop
+ /grays columns 8 add depth sub depth mul 8 idiv string def
+ columns rows depth
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { currentfile grays readhexstring pop } image
+ }
+ {
+ %
+ % Parameters:
+ % colors: number of colors in the colormap.
+ % colormap: red, green, blue color packets.
+ %
+ currentfile buffer readline pop
+ token pop /colors exch def pop
+ /colors colors 3 mul def
+ /colormap colors string def
+ currentfile colormap readhexstring pop pop
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { PseudoClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayPseudoClassPacket } image
+ } ifelse
+ } ifelse
+} bind def
+
+/DisplayImage
+{
+ %
+ % Display a DirectClass or PseudoClass image.
+ %
+ % Parameters:
+ % x & y translation.
+ % x & y scale.
+ % label pointsize.
+ % image label.
+ % image columns & rows.
+ % class: 0-DirectClass or 1-PseudoClass.
+ % compression: 0-none or 1-RunlengthEncoded.
+ % hex color packets.
+ %
+ gsave
+ /buffer 512 string def
+ /byte 1 string def
+ /color_packet 3 string def
+ /pixels 768 string def
+
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ x y translate
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ currentfile buffer readline pop
+ token pop /pointsize exch def pop
+ /Times-Roman findfont pointsize scalefont setfont
+ x y scale
+ currentfile buffer readline pop
+ token pop /columns exch def
+ token pop /rows exch def pop
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ currentfile buffer readline pop
+ token pop /compression exch def pop
+ class 0 gt { PseudoClassImage } { DirectClassImage } ifelse
+ grestore
+} bind def
+%%EndProlog
+%%Page: 1 1
+%%PageBoundingBox: 0 0 246 156
+userdict begin
+DisplayImage
+0 0
+246 156
+12.000000
+246 156
+1
+1
+1
+1
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+fcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffff800fff
+fffffffffffffffffff800fffffffffffffffffffffcfffffffffffc7ff1ffffffffffff
+ffffffffc7ff1ffffffffffffffffffffcffffffffffe3fffe3ffffffffffffffffffe3f
+ffe3fffffffffffffffffffcffffffffffdfffffdffffffffffffffffffdfffffdffffff
+fffffffffffffcffffffffff3fffffe7fffffffffffffffff3fffffe7fffffffffffffff
+fffcfffffffffefffffffbffffffffffffffffefffffffbffffffffffffffffffcffffff
+fffdfffffffdffffffffffffffffdfffffffdffffffffffffffffffcfffffffffbffffff
+feffffffffffffffffbfffffffeffffffffffffffffffcfffffffff7ffffffff7fffffff
+ffffffff7ffffffff7fffffffffffffffffcfffffffff7ffffffff7fffffffffffffff7f
+fffffff7fffffffffffffffffcffffffffefffffffffbffffffffffffffefffffffffbff
+fffffffffffffffcffffffffefff8c6fffbffffffffffffffeff18fffffbffffffffffff
+fffffcffffffffdfffdecfffdffffffffffffffdffbdfffffdfffffffffffffffffcffff
+ffffdfffedafffdffffffffffffffdffdbfffffdfffffffffffffffffcffffffffdfffe3
+efffdffffffffffffffdffc74ffffdfffffffffffffffffcffffffffdffff3efffdfffff
+fffffffffdffe7b7fffdfffffffffffffffffcffffffffdfffe9efffdffffffffffffffd
+ffd3b7fffdfffffffffffffffffcffffffffdfffedefffdffffffffffffffdffdbb7fffd
+fffffffffffffffffcffffffffdfffdeefffdffffffffffffffdffbdb7fffdffffffffff
+fffffffcffffffffefff8c47ffbffffffffffffffeff1813fffbfffffffffffffffffcff
+ffffffefffffffffbffffffffffffffefffffffffbfffffffffffffffffcfffffffff7ff
+ffffff7fffffffffffffff7ffffffff7fffffffffffffffffcfffffffff7ffffffff7fff
+ffffffffffff7ffffffff7fffffffffffffffffcfffffffffbfffffffeffffff3cf3ffff
+ffbfffffffeffffffffffffffffffcfffffffffdfffffffdffffff3cf3ffffffdfffffff
+dffffffffffffffffffcfffffffffefffffff9ffffffffffffffffefffffffbfffffffff
+fffffffffcffffffffff3fffffe67ffffffffffffffff3fffffe7ffffffffffffffffffc
+ffffffffffdfffffdf8ffffffffffffffffdfffffdfffffffffffffffffffcffffffffff
+e3fffe3ff3fffffffffffffffe3fffe3fffffffffffffffffffcfffffffffffc7ff1fffc
+7fffffffffffffffc7ff1ffffffffffffffffffffcfffffffffff7800f7fff9fffffffff
+fffffff000f7fffffffffffffffffffcfffffffffff7ffff7fffe3ffffffffffffff87ff
+fbfffffffffffffffffffcffffffffffefffffbffffcfffffffffffffe5ffffdffffffff
+fffffffffffcffffffffffefffffbfffff1ffffffffffff13ffffeffffffffffffffffff
+fcffffffffffdfffffdfffffe7ffffffffff8effffff7ffffffffffffffffffcffffffff
+ffdfffffdffffff8fffffffffc7dffffffbffffffffffffffffffcffffffffffdfffffef
+ffffff3ffffffff3f3ffffffdffffffffffffffffffcffffffffffbfffffefffffffc7ff
+ffff8fefffffffeffffffffffffffffffcffffffffffbffffff7fffffff9fffffc7f9fff
+fffff7fffffffffffffffffcffffffffffbffffff7fffffffe3fffe3ff7ffffffffbffff
+fffffffffffffcffffffffff7ffffffbffffffffcfff9ffcfffffffffdffffffffffffff
+fffcffffffffff7ffffffbfffffffff1fc7ffbfffffffffefffffffffffffffffcffffff
+fffefffffffdfffffffffe63fff7ffffffffff7ffffffffffffffffcfffffffffeffffff
+fdffffffffff0fffcfffffffffffbffffffffffffffffcfffffffffefffffffeffffffff
+fcf3ffbfffffffffffdffffffffffffffffcfffffffffdfffffffeffffffffe3fc7e7fff
+ffffffffeffffffffffffffffcfffffffffdffffffff7fffffff1fff9dfffffffffffff7
+fffffffffffffffcfffffffffdffffffff7ffffff8ffffe3fffffffffffffbffffffffff
+fffffcfffffffffbffffffffbfffffe7ffffecfffffffffffffdfffffffffffffffcffff
+fffffbffffffffbfffff1fffffdf1ffffffffffffefffffffffffffffcfffffffff7ffff
+ffffdffff8ffffff3fe7ffffffffffff7ffffffffffffffcfffffffff7ffffffffdfffc7
+fffffefff8ffffffffffffbffffffffffffffcfffffffff7ffffffffefff3ffffff9ffff
+3fffffffffffdffffffffffffffcffffffffefffffffffeff8fffffff7ffffc7ffffffff
+ffeffffffffffffffcffffffffeffffffffff7c7ffffffcffffff9fffffffffff7ffffff
+fffffffcffffffffeffffffffff63fffffffbffffffe3ffffffffffbfffffffffffffcff
+ffffffdffffffffff9ffffffff7fffffffcffffffffffdfffffffffffffcffffffffdfff
+ffffffc3fffffffcfffffffff1fffffffffefffffffffffffcffffffffbffffffffe3dff
+ffffdbfffffffffe7fffffffff7ffffffffffffcffffffffbffffffff1fdffffffa7ffff
+ffffff8fffffffffbffffffffffffcfffffffebfffffffcffebfffff5ffffffffffff3ff
+ffffffdffffffffffffcfffffffe7ffffffe3ffebffffe27fffffffffffc7fffffffefff
+fffffffffcfffffffe5ffffff1fffd3ffffe9fffffffffffff9ffffffff6fffffffffffc
+fffffffe3fffff8ffffe5ffffc7fffffffffffffe3fffffffb7ffffffffffcfffffffcbf
+fffe7fffff1ffff9fffffffffffffffcfffffffd7ffffffffffcfffffffc7fff71ffffff
+1fffffffffffffffffffff1ffffff6bffffffffffcfffffffcfffc8fffffff9fffffffff
+ffffffffffffe7fffff93ffffffffffcfffffffcfff07fffffffcfffffffffffffffffff
+fff8fffffe1ffffffffffcfffffffdffc3ffffffffefffffffffffffffffffffff3fffff
+9ffffffffffcffffffffff803fffffffffffffffffffffffffffffffc7ffffefffffffff
+fcffffff001fffffffffffff800ffffffffffffffffffff9dffff800fffffffcfffff8ff
+e3fffffffffffc7ff1fffffffffffffffffffe27ffc7ff1ffffffcffffc6eeec7fffffff
+ffe377763fffffffffffffffffffcbfe377763fffffcffffbfffffbfffffffffdfffffdf
+ffffffffffffffffff00fdfffffdfffffcfffe3bbfbb8fffffffff1ddfddc7ffffffffff
+fffffffff871ddfddc7ffffcfffdfffffff7fffffffefffffffbffffffffffffffffffff
+efffffffbffffcfffaeeeeeeebfffffffd77777775ffffffffffffffffffffd77777775f
+fffcfff7fffffffdfffffffbfffffffeffffffffffffffffffffbfffffffeffffcffebfb
+fbfbfafffffff5fdfdfdfd7fffffffffffffffffff5fdfdfdfd7fffcffeffffffffeffff
+fff7ffffffff7fffffffffffffffffff7ffffffff7fffcffceeeeeeeee7fffffe7777777
+773ffffffffffffffffffe7777777773fffcffdfe38dffff7fffffeff1c47fffbfffffff
+fffffffffffefe38fffffbfffcffbfb799bfbfbfffffdfdbcb9fdfdfffffffffffffffff
+fdfd7dfdfdfdfffcffbffbb5ffffbfffffdffddfbfffdfffff3cf3fffffffffdffbbffff
+fdfffcffaeecaceeeebfffffd7765737775fffff3cf3fffffffffd7753417775fffcffbf
+fc7dffffbfffffdffe3f7fffdffffffffffffffffffdffc7b6fffdfffcffbbfaf9fbfbbf
+ffffddfd7cfdfddffffffffffffffffffddfcf96dfddfffcffbffefdffffbfffffdfff7d
+ffffdffffffffffffffffffdffefb6fffdfffcffaeeeeceeeebfffffd7777337775fffff
+fffffffffffffd7767367775fffcffdffc78ffff7fffffeffe383fffbfffffffffffffff
+fffeffc7127ffbfffcffdfbbbfbbbf7fffffefdddfdddfbffffffffffffffffffefdddfd
+ddfbfffcffeffffffffefffffff7ffffffff7fffffffffffffffffff7ffffffff7fffcff
+eeeeeeeeeefffffff7777777777fffffffffffffffffff7777777777fffcfff7fffffffd
+fffffffbfffffffeffffffffffffffffffffbfffffffeffffcfffbfbfbfbfbfffffffdfd
+fdfdfdffffffffffffffffffffdfdfdfdfdffffcfffdfffffff7fffffffefffffffbffff
+ffffffffffffffffefffffffbffffcfffe6eeeeecfffffffff37777767ffffffffffffff
+fffffff37777767ffffcffffbfffffbfffffffffdfffffdffffffffffffffffffffffdff
+fffdfffffcffffc7bfbc7fffffffffe3dfde3ffffffffffffffffffffffe3dfde3fffffc
+fffff8ffe3fffffffffffc7ff1ffffffffffffffffffffffffc7ff1ffffffcffffff001f
+ffffffffffff800ffffffffffffffffffffffffff800fffffffcffffffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+fcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+fffcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+fffffcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffff
+fffffffffffff80fffffffffffe7ffeffffffffffffffffffffffffffcffffffffffffff
+fffdeffffffffffff7fffffffffffffffffffffffffffffcfffffffffffffffffdffffbf
+fffffff7fffffffffffffffffffffffffffffcfffffffffffffffffdde790cd3e69e74cc
+4e3ffffffffffffffffffffffffcfffffffffffffffffc1db6bb67db6db6db6dbfffffff
+fffffffffffffffffcfffffffffffffffffdde37bb6fe36e3699ecffffffffffffffffff
+fffffffcfffffffffffffffffdfdb7bb6fdb6db6be6f3ffffffffffffffffffffffffcff
+fffffffffffffffdfdb6bb6fdb6db73b6dbffffffffffffffffffffffffcffffffffffff
+fffff87e59ccc7e4264378c47ffffffffffffffffffffffffcffffffffffffffffffffff
+ffffffffff7ffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffe
+fffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffffff
+fffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+fcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffffffc
+end
+%%PageTrailer
+%%Trailer
+%%EOF
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_scalar.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/fa_scalar.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 300 375 3675 1950
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 975 600 300 225 975 600 1275 825
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2475 600 300 225 2475 600 2775 825
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 600 1650 300 225 600 1650 900 1875
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 3315 1654 300 225 3315 1654 3615 1879
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1575 1650 300 225 1575 1650 1875 1875
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 825 600 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1200 750 3075 1500
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2400 825 750 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2625 825 3225 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1125 825 1425 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2400 825 1650 1350
+4 0 -1 0 0 0 12 0.0000 4 135 225 900 675 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 2325 675 Xn\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 450 1725 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 270 3150 1725 Ym\001
+4 0 -1 0 0 0 24 0.0000 4 30 270 1575 750 ...\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1425 1725 Y2\001
+4 0 -1 0 0 0 24 0.0000 4 30 270 2175 1650 ...\001
+-6
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_scalar.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/fa_scalar.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/fa_scalar_caption.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/fa_scalar_caption.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+6 300 375 3675 1950
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 975 600 300 225 975 600 1275 825
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2475 600 300 225 2475 600 2775 825
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 600 1650 300 225 600 1650 900 1875
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 3315 1654 300 225 3315 1654 3615 1879
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1575 1650 300 225 1575 1650 1875 1875
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 825 600 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1200 750 3075 1500
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2400 825 750 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2625 825 3225 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1125 825 1425 1425
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2400 825 1650 1350
+4 0 -1 0 0 0 12 0.0000 4 135 210 900 675 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 2325 675 Xn\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 450 1725 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 270 3150 1725 Ym\001
+4 0 -1 0 0 0 24 0.0000 4 30 270 1575 750 ...\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1425 1725 Y2\001
+4 0 -1 0 0 0 24 0.0000 4 30 270 2175 1650 ...\001
+-6
+4 0 -1 0 0 0 12 0.0000 4 180 1170 1200 2325 Factor analysis\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/factorial_hmm3.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/factorial_hmm3.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,81 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 1 0 0 100 0 -1 0.000 0 1 1 0 2587.500 3637.500 1500 2775 1200 3675 1500 4500
+ 0 0 1.00 60.00 120.00
+5 1 0 1 0 0 100 0 -1 0.000 0 1 1 0 2772.051 3261.376 1500 1875 900 3450 1425 4575
+ 0 0 1.00 60.00 120.00
+5 1 0 1 0 0 100 0 -1 0.000 0 1 1 0 5962.500 3637.500 4875 2775 4575 3675 4875 4500
+ 0 0 1.00 60.00 120.00
+5 1 0 1 0 0 100 0 -1 0.000 0 1 1 0 4162.500 3637.500 3075 2775 2775 3675 3075 4500
+ 0 0 1.00 60.00 120.00
+5 1 0 1 0 0 100 0 -1 0.000 0 1 1 0 4347.051 3261.376 3075 1875 2475 3450 3000 4575
+ 0 0 1.00 60.00 120.00
+5 1 0 1 0 0 100 0 -1 0.000 0 1 1 0 6147.051 3261.376 4875 1875 4275 3450 4800 4575
+ 0 0 1.00 60.00 120.00
+6 1425 4350 2100 4875
+1 1 0 1 0 0 100 0 5 0.000 1 0.0000 1749 4596 300 225 1749 4596 2049 4821
+4 0 0 100 0 0 20 0.0000 4 195 345 1575 4725 Y1\001
+-6
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1770 3645 300 225 1770 3645 2070 3870
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3420 3645 300 225 3420 3645 3720 3870
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5220 3645 300 225 5220 3645 5520 3870
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1770 2745 300 225 1770 2745 2070 2970
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3420 2745 300 225 3420 2745 3720 2970
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5220 2745 300 225 5220 2745 5520 2970
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5220 1845 300 225 5220 1845 5520 2070
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3420 1845 300 225 3420 1845 3720 2070
+1 1 0 1 0 7 100 0 -1 0.000 1 0.0000 1770 1845 300 225 1770 1845 2070 2070
+1 1 0 1 0 0 100 0 5 0.000 1 0.0000 3399 4596 300 225 3399 4596 3699 4821
+1 1 0 1 0 0 100 0 5 0.000 1 0.0000 5199 4521 300 225 5199 4521 5499 4746
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 1
+ 4200 4200
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2100 3600 3150 3600
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3750 3600 4950 3600
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 1
+ 4200 3300
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2100 2700 3150 2700
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3750 2700 4950 2700
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 0 0 1
+ 4200 2400
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2100 1800 3150 1800
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3750 1800 4950 1800
+2 1 0 1 0 0 100 0 5 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1725 3900 1725 4350
+2 1 0 1 0 0 100 0 5 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 5175 3825 5175 4275
+2 1 0 1 0 0 100 0 5 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3450 3900 3450 4350
+4 0 0 100 0 0 20 0.0000 4 195 345 1575 1950 A1\001
+4 0 0 100 0 0 20 0.0000 4 195 330 1575 2850 B1\001
+4 0 0 100 0 0 20 0.0000 4 195 330 1575 3750 C1\001
+4 0 0 100 0 0 20 0.0000 4 195 345 3225 1950 A2\001
+4 0 0 100 0 0 20 0.0000 4 195 330 3225 2850 B2\001
+4 0 0 100 0 0 20 0.0000 4 195 330 3225 3750 C2\001
+4 0 0 100 0 0 20 0.0000 4 195 345 5025 1950 A3\001
+4 0 0 100 0 0 20 0.0000 4 195 330 5025 2850 B3\001
+4 0 0 100 0 0 20 0.0000 4 195 330 5025 3750 C3\001
+4 0 0 100 0 0 30 0.0000 4 30 525 6375 3000 . . .\001
+4 0 0 100 0 0 20 0.0000 4 195 345 3225 4725 Y2\001
+4 0 0 100 0 0 20 0.0000 4 195 345 5025 4650 Y3\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/filter.eps
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/filter.eps Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,9861 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%Creator: (ImageMagick)
+%%Title: (filter.eps)
+%%CreationDate: (Tue Nov 16 20:04:33 2004)
+%%BoundingBox: 0 0 657 525
+%%DocumentData: Clean7Bit
+%%LanguageLevel: 1
+%%Pages: 1
+%%EndComments
+
+%%BeginDefaults
+%%EndDefaults
+
+%%BeginProlog
+%
+% Display a color image. The image is displayed in color on
+% Postscript viewers or printers that support color, otherwise
+% it is displayed as grayscale.
+%
+/DirectClassPacket
+{
+ %
+ % Get a DirectClass packet.
+ %
+ % Parameters:
+ % red.
+ % green.
+ % blue.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/DirectClassImage
+{
+ %
+ % Display a DirectClass image.
+ %
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { DirectClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayDirectClassPacket } image
+ } ifelse
+} bind def
+
+/GrayDirectClassPacket
+{
+ %
+ % Get a DirectClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % red
+ % green
+ % blue
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/GrayPseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassImage
+{
+ %
+ % Display a PseudoClass image.
+ %
+ % Parameters:
+ % class: 0-PseudoClass or 1-Grayscale.
+ %
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ class 0 gt
+ {
+ currentfile buffer readline pop
+ token pop /depth exch def pop
+ /grays columns 8 add depth sub depth mul 8 idiv string def
+ columns rows depth
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { currentfile grays readhexstring pop } image
+ }
+ {
+ %
+ % Parameters:
+ % colors: number of colors in the colormap.
+ % colormap: red, green, blue color packets.
+ %
+ currentfile buffer readline pop
+ token pop /colors exch def pop
+ /colors colors 3 mul def
+ /colormap colors string def
+ currentfile colormap readhexstring pop pop
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { PseudoClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayPseudoClassPacket } image
+ } ifelse
+ } ifelse
+} bind def
+
+/DisplayImage
+{
+ %
+ % Display a DirectClass or PseudoClass image.
+ %
+ % Parameters:
+ % x & y translation.
+ % x & y scale.
+ % label pointsize.
+ % image label.
+ % image columns & rows.
+ % class: 0-DirectClass or 1-PseudoClass.
+ % compression: 0-none or 1-RunlengthEncoded.
+ % hex color packets.
+ %
+ gsave
+ /buffer 512 string def
+ /byte 1 string def
+ /color_packet 3 string def
+ /pixels 768 string def
+
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ x y translate
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ currentfile buffer readline pop
+ token pop /pointsize exch def pop
+ /Times-Roman findfont pointsize scalefont setfont
+ x y scale
+ currentfile buffer readline pop
+ token pop /columns exch def
+ token pop /rows exch def pop
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ currentfile buffer readline pop
+ token pop /compression exch def pop
+ class 0 gt { PseudoClassImage } { DirectClassImage } ifelse
+ grestore
+} bind def
+%%EndProlog
+%%Page: 1 1
+%%PageBoundingBox: 0 0 657 525
+userdict begin
+DisplayImage
+0 0
+657 525
+12.000000
+657 525
+1
+1
+1
+8
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff000000000000000000ffffffffffffffff0000ff00000000
+00000000ffffff0000000000ffffffffffffff0000ffffffffffffffff00ffffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffff0000ffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffff00ffffffffffffff00ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff0000ffffff00000000ffffffff
+ff0000ffffffff00000000ffffffffffff0000ffffffffffffffff0000ffffffffffffff
+ffffff00ffffffffffff0000ffffffffffffffffffffffffffffffffffff0000ffffffff
+ff00000000ffffffffffffffffffffffffffffffffffffffff00ffffffffffffff00ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffff
+ffffff000000ffffffff00ffffffffffff00000000ffffffffff00ffffffffffffffffff
+00ffffffffffff00ffffffffff00ffffffffff0000ffffffffffffffffffffffffffffff
+ffffff00ffffffffffffffff0000ffffffffffffffffffffffffffffff00ffffffffff00
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffffff0000ffffff00ffffffffffffffff000000ffffffff0000
+ffffffffffffffff00ffffffffffff0000ffffffffff0000ffffffff0000ffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffff0000ffffffffffffffffffffffffff
+ff0000ffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff0000ffffffffffff0000ffff0000ffffffffffffffff
+ff000000ffff0000ffffffffffffffff0000ffffffffffff0000ffffffffffff0000ffff
+ff0000ffffffffffffffffffffffffffffffff0000ffffffffffffffffff0000ffffffff
+ffffffffffffffffffff0000ffffffffffff0000ffffffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff0000ffff00
+00ffffffffffffffffffff0000ffff00ffffffffffffffffff0000ffffffff0000000000
+00ffffffff0000ffffff0000ffffff0000000000ffff000000ffffff0000ffffffffffff
+ffffff0000ffffffffffffff0000ffffff000000000000ffffffff0000ffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+0000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffff
+ffffff000000ff0000ffffffffffffffffffffff00000000ffffffffffffffffff0000ff
+ffffffffffff0000ffffffffffff000000ffff0000ffffffff0000ffffffffff00ffffff
+0000ffffffffffffffffffff0000ffffffffffffff000000ffffffff0000ffffffffffff
+000000ffffffffff000000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff0000ffffffff000000ffff0000ffffffffffffffffffffffff000000ffffff
+ffffffffffff0000ffffffffffffff0000ffffffffffffff0000ffff0000ffffffff0000
+ffffffffff00ffffff0000ffffffffffffffffffff0000ffffffffffffff0000ffffffff
+ff0000ffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff00000000000000ffffffff0000ffffffffffffffffff
+ffffff00000000ffffffffffffffff0000ffffffffffffff0000ffffffffffffff0000ff
+ff0000ffffffff000000ffffff0000ffffff0000ffffffffffffffffffff0000ffffffff
+ffffffffffffffffffff0000ffffffffffffff0000ffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffff0000
+ffffffffffffffffffffff0000000000ffffffffffffffff0000ffffffffffffff0000ff
+ffffffffffff0000ffff0000ffffffffff0000ffffff00ffffffff0000ffffffffffffff
+ffffff0000ffffffffffffffffffffffffffff0000ffffffffffffff0000ffffffffffff
+0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffff
+ffffffffffffff0000ffffffffffffffffffffff00ffff000000ffffffffffffff0000ff
+ffffffffffff0000ffffffffffffff0000ffff0000ffffffffff000000ff0000ffffffff
+0000ffffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffffff
+ff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffffffffffff0000ffffffffffffffffffff00ffffffff000000
+ffffffffffff0000ffffffffffffff0000ffffffffffffff0000ffff0000ffffffffffff
+0000ff0000ffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffff
+ff0000ffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff00000000ff0000ffffff000000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff0000ffffffffffffffffff0000ffffffffffffffffff
+00ffffffffff000000ffffffffffff0000ffffffffffffff0000ffffffffffffff0000ff
+ff0000ffffffffffff00000000ffffffffff0000ffffffffffffffffffff0000ffffffff
+ffffffffffffffffffff0000ffffffffffffff0000ffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff00ffff0000ff0000ffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffff0000
+00ffffffffffffff0000ffffffffffff000000ffffffffff000000ffffffffffff0000ff
+ffffffffffff0000ffff0000ffffffffffffff000000ffffffffff000000ffffffffffff
+ffffff0000ffffffffffffff0000ffffffffff0000ffffffffffffff0000ffffffffffff
+0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffff
+ffffffffffffffff0000ffffffffffff0000ffffffffffffff00000000ffffffffff0000
+ffffffffffff0000ff00ffffffff0000ffffff0000ffffffffffffff0000ffffffffffff
+ff0000ffffffffffffffffff0000ffffffffffffff000000ffffffff0000ff00ffffffff
+0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffffffffffffffff0000ffffffff00ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff000000000000ffffffffffffffff0000ffffffff000000000000ffffffff000000
+00000000ffffff0000ffffffffffffff000000ffffffff0000ffffff0000ffffffffffff
+ffff00ffffffffffffff0000ffffffffffffff00000000000000ffffffff0000ffffffff
+ffff000000ffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff0000ffffffffffff00ffffffff0000ffffff0000ffffffffffffffffffffffff
+ffffffffffffffffffff00ffffffff0000ffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffff0000ffffff
+ffffffffffffffffffff0000ffffffffffffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffff0000ffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00000000000000ff000000ffffffff0000ffff0000000000ff
+ffff000000000000ffff0000ff000000ff000000ffffff00000000000000ffffffffff00
+000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff00ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff
+ffffffff00ffffffffffffffffffffffffffff00ffffffffffffffffffff00ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff0000ffffffff00
+00ffffff0000ffffffff0000ffffff0000ffff000000ffff00ffff0000ffffffff0000ff
+ffff0000ffffff00ffffff000000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+0000ffffffffffffffffffff00ffffffffffffffffffffffffffff0000ffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffff0000ffffffff0000ffffff0000ffffffff0000000000000000ff0000ffffffffffff
+0000ffffffff0000ffffff0000ffffff0000ffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffff0000ffffffffffffffffffffff000000
+00ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff0000ffffffffff0000ffffffff0000ffffff0000ffffff0000ffffffffffffff
+ff0000ffffffffffff0000ffffffff0000ffffff0000ffffff0000ffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff000000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffff0000ffffffff0000ffffff0000ffffff
+0000ffffffffffffffff0000ffffffffffff0000ffffffff0000ffffff0000ffffff0000
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff0000ffffffff00
+00ffffff0000ffffff000000ffffffffffffff0000ffffffffffff0000ffffffff0000ff
+ffff0000ffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffff0000ffffffff0000ffffff0000ffffffff0000ffffffffff00ff0000ffffffffffff
+0000ffffffff0000ffffff0000ffffff0000ffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff0000ffffffffff0000ffffffff0000ffffff0000ffffffff00000000000000ff
+ff0000ffffffffffff0000ffffffff0000ffffff0000ffffff00000000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000000000ffff00000000ffff0000000000ffff000000ff
+ffff0000000000ffff0000000000ffffff00000000ffff00000000ff00000000ffff0000
+0000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffff0000000000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+0000000000000000000000000000000000000000000000000000000000000000000000ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff00ffffffffffffff00ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff00ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff0000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff00ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff00ff0000ffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff00ff0000ffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000
+ffffffffff00000000ffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff00000000ffffffffffffffffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff000000000000000000ffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffff00ffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff000000ffffffffffffffff0000ffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff0000ffffff00000000ffffffffff0000
+ffffffffffffffffffffffffffffffffffff0000ffffffffff00000000ffffffffffffff
+ffffffffffffffffffffffffffff00ffffffffffffffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffff0000ffffffffff00000000ffffffffffffffffff
+ffffffffffffffffffffffff00ffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff000000ffffffffffffffff0000ffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff
+000000ffffffff00ffffffffffffffffffffffffffffffffffffff00ffffffffffffffff
+0000ffffffffffffffffffffffffffffff00ffffffffffff00ffffffffffffffffffffff
+0000ffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff0000
+ffffffffffffffffffffffffffffff00ffffffffffff00ffffffffffffff00ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff000000ffffffffffffff00ffff
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff0000ffffffffffff0000ffffff00ffffffffffffffffffffffffffffffffffffff00
+ffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffffff0000ff
+ffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff00ffff
+ffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffffff0000ffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff000000ff
+ffffffffff0000ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff0000ffffffffffff0000ffff0000ffffffffffffffffffffff
+ffffffffffffff0000ffffffffffffffffff0000ffffffffffffffffffffffffffff0000
+ffffffffffffff0000ffffffffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffff
+ffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff000000ffffffffff0000ffffffffff0000ffffff000000000000ffffffff
+0000000000ffffffff0000ff000000ffff0000ff00000000ffffffffff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff0000000000ffffffffff0000ff000000ffffffff0000
+0000ffffffffffff0000ffff00000000ffff00000000ffffffffff0000000000ffffff00
+0000000000ff00000000ffffffffffffffffffff0000ffffffffffff0000ffff0000ffff
+ffff000000000000ff00000000ffffff0000ffffffffffffffffff0000ffffffffffffff
+0000ffffff000000000000ffffffffff0000ffffffffffffffffff0000ffffffffffffff
+ff0000000000ffff000000ffffff0000ffffffffffffffffff0000ffffffffffffff0000
+ffffff000000000000ffffffffff0000ffffffffffff0000ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff000000ffffffffff00ffffffffff000000ffffffff
+ff0000ffffffffff0000ffffff0000ff0000000000000000ffff0000000000000000ffff
+ff000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+0000000000000000000000000000000000000000000000000000000000000000ffffffff
+ffffffffffffffffffffffffffffffffffffffffff000000ffff000000ffff0000000000
+000000ffffff0000ffff0000000000ff0000000000ffff0000000000ffff0000ffff0000
+00ffff000000ffffffff000000ffff0000ffffffffffffffffffffffff0000ffffffffff
+000000ff0000ffffffffffffff000000ffff0000ffffffff0000ffffffffffffffffffff
+0000ffffffffffffff000000ffffffff0000ffffffffffffff000000ffffffffffffffff
+0000ffffffffffffffffff0000ffffffffff00ffffff0000ffffffffffffffffffff0000
+ffffffffffffff000000ffffffff0000ffffffffffffff000000ffffffffff000000ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff000000ffffff0000ffffff
+ffffff0000ffffffffff0000ffffffff0000ffffffff000000ffff0000ffffffffffff00
+0000ffff00000000ffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000ffffff
+0000ffffffff0000ffffffffffff0000ffffff000000ffffffff000000ffffffff000000
+ffffff0000ffff000000ffffff0000ffffffffff0000ff0000ffffffffffffffffffffff
+ffff0000ffffffff000000ffff0000ffffffffffffffff0000ff0000ffffffffff0000ff
+ffffffffffffffffff0000ffffffffffffff0000ffffffffff0000ffffffffffffffff00
+00ffffffffffffffff0000ffffffffffffffffff0000ffffffffff00ffffff0000ffffff
+ffffffffffffff0000ffffffffffffff0000ffffffffff0000ffffffffffffffff0000ff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+00ffffff00ffffffffffffff0000ffffffffff0000ffffffff000000000000000000ffff
+0000ffffffffffff0000ffffffff000000ffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff00ffffffff0000ffffffff0000ffffffffffff0000ffffffff0000ffffffff00
+00ffffffffff0000ffffffff0000ffffff00ffffffff0000ffffffffffff000000ffffff
+ffffffffffffffffffffff00000000000000ffffffff0000ffffffffffffffffff000000
+ffffffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffffff0000
+ffffffffffffffff0000ffffffffffffffff0000ffffffffffffffffff000000ffffff00
+00ffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffff
+ffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff000000ff0000ffffffffffffff0000ffffffffff0000ffffffff0000
+ffffffffffffffffff0000ffffffffffff0000ffffffffff0000ffffff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00000000ffffffff0000ffffffffffff0000ffff
+ffff0000ffffffff0000ffffffffff0000ffffffff0000ffffffffffff00000000ffffff
+ffffff000000ffffffffffffffffffffffffffff0000ffffffffffffffffff0000ffffff
+ffffffffffff000000ffffffffffff0000ffffffffffffffffffff0000ffffffffffffff
+ffffffffffffff0000ffffffffffffffff0000ffffffffffffffff0000ffffffffffffff
+ffffff0000ffffff00ffffffff0000ffffffffffffffffffff0000ffffffffffffffffff
+ffffffffff0000ffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff000000ff0000ffffffffffffff0000ffffffff
+ff0000ffffff000000ffffffffffffffffff0000ffffffffffff0000ffffffffff0000ff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff0000ffff0000ffffffff0000ff
+ffffffffff0000ffffffff0000ffffffff0000ffffffffff0000ffffffff0000ffffffff
+0000ffff0000ffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffff
+ffffffff0000ffffffffffffffffffff0000ffffffffffff0000ffffffffffffffffffff
+0000ffffffffffffffffffffffffffff0000ffffffffffffffff0000ffffffffffffffff
+0000ffffffffffffffffffff000000ff0000ffffffff0000ffffffffffffffffffff0000
+ffffffffffffffffffffffffffff0000ffffffffffffffff0000ffffffffffff0000ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff
+ffffff0000ffffffffff0000ffffff000000ffffffffffffffffff0000ffffffffffff00
+00ffffffffff0000ffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+0000ffffffff0000ffffffffffffff0000ffff0000ffffffffff0000ffffffffff0000ff
+ffffff0000ffffff0000ffffff0000ffffffffffff00000000ffffffffffffffffffffff
+ffff0000ffffffffffffffffff0000ffffffffffffffffff00000000ffffffffff0000ff
+ffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffffffffff00
+00ffffffffffffffff0000ffffffffffffffffffffff0000ff0000ffffffff0000ffffff
+ffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffffffffff0000ff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+00000000ffffffffffffffff0000ffffffffff0000ffffffff0000ffffffffffff00ffff
+0000ffffffffffff0000ffffffffff0000ffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffff0000ffffffff0000ffffffffffffff0000000000ffffffffffff00
+00ffffffffff0000ffffffff0000ffff0000ffffffff0000ffffffffff0000ff000000ff
+ffffffffffffffffffffff0000ffffffffffffffffff0000ffffffffffffffff0000ff00
+0000ffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffffff0000
+ffffffffffffffff0000ffffffffffffffff0000ffffffffffffffffffffff00000000ff
+ffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffff
+ffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffff0000ffffffffff0000ffffffff0000
+00ffffffff0000ffff0000ffffffffffff0000ffffffff0000ffffffff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffff0000ffffffff0000ffffffffffff0000ffff
+ffffffffffffffff0000ffffffffff0000ffffffff0000ffff0000ffffffff0000ffffff
+ff0000ffffff0000ffffffffffffffffffffffff0000ffffffffffffffffff000000ffff
+ffffffff0000ffffff0000ffffffff000000ffffffffffffffffff0000ffffffffffffff
+0000ffffffffff0000ffffffffffffffff0000ffffffffffffffff0000ffffffffffffff
+ffffffffff000000ffffffffff000000ffffffffffffffffff0000ffffffffffffff0000
+ffffffffff0000ffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffff0000ffffffff
+ff0000ff00ffffff00000000000000ffffff0000ffffffffffff0000ffffffff00ffffff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffff0000ffff00000000ffffffff0000ff
+ffffffffff00000000000000ffffffffff0000ffffffffff0000ffffffff0000ffff0000
+ffff00000000ffffffff00ffffffff000000ffffffffffffffffffffff0000ffffffffff
+ffffffffff0000ffffffffffff00ffffffff000000ffffffff0000ffffffffffffffffff
+0000ffffffffffffff000000ffffffff0000ff00ffffffffff0000ffffffffffffffffff
+0000ffffffffffffffffffffffff0000ffffffffffffff0000ffffffffffffffffff0000
+ffffffffffffff000000ffffffff0000ff00ffffffffff0000ffffffffffff0000ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff
+ffff0000000000ffffffff000000ffffffff0000000000ffff000000000000ffffffffff
+0000000000ffffffffff0000000000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff
+00000000000000000000ffffffff0000000000000000ffff000000000000ff0000000000
+00ff0000000000ff00000000ff00000000ff000000ffff000000000000ffffffffffffff
+000000000000ffffffffffffffff0000ffffffffff000000ffff000000000000ffff0000
+ffffffffffffff00000000000000ffffffff0000ffffffffffff000000ffffffffff0000
+ffffffffffffffffff0000ffffffffffffffffffffffffff00ffffffffffffff0000ffff
+ffffffffff00000000000000ffffffff0000ffffffffffff000000ffffffffff0000ffff
+ffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff0000ff000000000000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff0000ffff
+ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff
+ffffffffff00ffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff00ffffffffffffff00ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff0000ffffffffffffffffffffffffffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffffffffffffff0000ffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffff00ffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff000000ffffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff00000000000000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffff
+ffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+0000000000000000000000000000000000000000000000000000ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff0000ffffffffff00000000ffffffffff
+ffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffff0000ffffffffffffffffffffffffffffff00ffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff00ffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff0000ffffffffffffffffff0000ffffffffffffffffffffffffffff
+0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff000000000000ff00000000ffffff0000ffffffffffffffffff0000ffffffffff
+ffff0000ffffff000000000000ffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff000000ffff0000ffffffff0000ffffffffffffffff
+ffff0000ffffffffffffff000000ffffffff0000ffffffffffff000000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff0000ff0000ffffffffff00
+00ffffffffffffffffffff0000ffffffffffffff0000ffffffffff0000ffffffffffffff
+0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+0000ffffffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffffff
+0000ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff000000ffffffffffff0000ffffffffffffffffffff0000ffffffffff
+ffffffffffffffffff0000ffffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff0000ffffffffffff0000ffffffffffffffff
+ffff0000ffffffffffffffffffffffffffff0000ffffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff00
+00ffffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffffffff
+0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+ff000000ffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffffff
+0000ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff0000ffffff0000ffffffff000000ffffffffffffffffff0000ffffffffff
+ffff0000ffffffffff0000ffffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff00ffffffff000000ffffffff0000ffffffffffffff
+ffff0000ffffffffffffff000000ffffffff0000ff00ffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff000000ffff000000000000ffff
+0000ffffffffffffff00000000000000ffffffff0000ffffffffffff000000ffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff00ff0000ffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff0000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff00ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff00ff0000ffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff000000000000000000ffffffffffffffff0000ff00
+00000000000000ffffff0000000000ffffffffffffff0000ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff000000ffffffffffffffffffffffffffff000000
+ffffffffffffffffffffffffffffffffffffffffff00ffffffffffffff0000ffffffffff
+ffffffffffffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffff00ffffffffffffff00ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff00000000ff
+ffffffff0000ffffffff00000000ffffffffffff0000ffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff00ffffffff
+ffff0000ffffffffffffffffffffffffffffffffff0000ffffffffff00000000ffffffff
+ffffffffffffffffffffffffffffffffff00ffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffff000000ffffffff00ffffffffffff00000000ffffffffff00ffffffffffff
+ffffff00ffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffff0000ffffffffff00ffffffffffffffffffffffff
+ffffffffff00ffffffffff0000ffffffffffffffffffffffffffffffffff00ffffffffff
+ffffff0000ffffffffffffffffffffffffffffff00ffffffffffff00ffffffffffffff00
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffff0000ffffff00ffffffffffffffff000000ffffff
+ff0000ffffffffffffffff00ffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffff0000ffffffff0000ffffff
+ffffffffffffffffffffffffffff0000ffffffff0000ffffffffffffffffffffffffffff
+ffff00ffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffffff
+0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffff0000ffff0000ffffffffff
+ffffffff000000ffff0000ffffffffffffffff0000ffffffffffff0000ffffffffffffff
+ff0000ffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff0000
+ffffffff0000ffffffffffffffffffffffffffffffffffff0000ffffff0000ffffffffff
+ffffffffffffffffffff0000ffffffffffffffffff0000ffffffffffffffffffffffffff
+ff0000ffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff0000
+ffff0000ffffffffffffffffffff0000ffff00ffffffffffffffffff0000ffffffff0000
+00000000ffffffffffff0000ffffffffffffffffffff000000ff0000ffffffffff000000
+0000ffffffffff0000ffff000000000000ffffffff0000000000ffffffffffffff0000ff
+ffff0000ffff0000000000ffff000000ffffff0000ffffffffffffffffff0000ffffffff
+ffffff0000ffffff000000000000ffffffffff0000ffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffff000000ff0000ffffffffffffffffffffff00000000ffffffffffffffffff
+0000ffffffffffffff0000ffffffffffffffff0000ffffffffffffffff0000ffffff0000
+00ffffffff0000ffffff0000ffffffff0000ffffffff0000ffffffff000000ffff000000
+ffffffffffff000000ffff0000ffffff0000ffffffffff00ffffff0000ffffffffffffff
+ffffff0000ffffffffffffff000000ffffffff0000ffffffffffffff000000ffffffffff
+000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffff000000ffff0000ffffffffffffffffffffffff000000
+ffffffffffffffffff0000ffffffffffffff0000ffffffffffffffff0000ffffffffffff
+ffff0000ffffffff0000ffffff0000ffffffff000000ffffff0000ffffffff0000ffffff
+ff000000ffffff0000ffffffffffffff0000ffff0000ffffff0000ffffffffff00ffffff
+0000ffffffffffffffffffff0000ffffffffffffff0000ffffffffff0000ffffffffffff
+ffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff00000000000000ffffffff0000ffffffffffff
+ffffffffffff00000000ffffffffffffffff0000ffffffffffffff0000ffffffffffffff
+ff0000ffffffffffffff0000ffffffffff0000ffffff000000000000000000ffffff0000
+ffffffff0000ffffffffff00ffffffff0000ffffffffffffff0000ffff0000ffffff0000
+00ffffff0000ffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffff
+ff0000ffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ff0000ffffffffffffffffffffff0000000000ffffffffffffffff0000ffffffffffffff
+0000ffff00000000000000000000000000ffff0000ffffffffff0000ffffff0000ffffff
+ffffffffffffff0000ffffffff0000ffffffffffffffff00000000ffffffffffffff0000
+ffff0000ffffffff0000ffffff00ffffffff0000ffffffffffffffffffff0000ffffffff
+ffffffffffffffffffff0000ffffffffffffffff0000ffffffffffff0000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffff0000ffffffffffffffffffffff00ffff000000ffffffffffffff
+0000ffffffffffffff0000ffff00000000000000000000000000ffff0000ffffffffff00
+00ffff000000ffffffffffffffffffff0000ffffffff0000ffffffffffff0000ffff0000
+ffffffffffffff0000ffff0000ffffffff000000ff0000ffffffff0000ffffffffffffff
+ffffff0000ffffffffffffffffffffffffffff0000ffffffffffffffff0000ffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000ffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffff0000ffffffffffffffffffff00ffffffff
+000000ffffffffffff0000ffffffffffffff0000ffffffffffffffff0000ffffffffffff
+ff0000ffffffffff0000ffff000000ffffffffffffffffffff0000ffffffff0000ffffff
+ffff0000ffffff0000ffffffffffffff0000ffff0000ffffffffff0000ff0000ffffffff
+0000ffffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffffffff
+ffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff000000ffffff0000ffffffffffffffffffffffffffffffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffff0000ffffffffffff
+ffffff00ffffffffff000000ffffffffffff0000ffffffffffffff0000ffffffffffffff
+ff0000ffffffffffffff0000ffffffffff0000ffffff0000ffffffffffff00ffffff0000
+ffffffff0000ffffffff0000ffffffff0000ffffffffffffff0000ffff0000ffffffffff
+00000000ffffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffff
+ff0000ffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffff0000ffffffffffffffffffffffff
+ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ff000000ffffffffffffff0000ffffffffffff000000ffffffffff000000ffffffffffff
+0000ffffffffffffffff0000ffffffffffffff000000ffffffff0000ffffff000000ffff
+ffff0000ffffff0000ffffffff0000ffffffff0000ffffffff0000ffffffffffffff0000
+ffff0000ffffffffffff000000ffffffffff000000ffffffffffffffffff0000ffffffff
+ffffff0000ffffffffff0000ffffffffffffffff0000ffffffffffff0000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffff0000ffffffffffff0000ffffffffffffff00000000ffffffff
+ff0000ffffffffffff0000ff00ffffffffffff0000ffffffffffffffff000000ffff0000
+000000ffff00000000000000ffffffff0000ffffffff0000ff00ffff0000ffff00000000
+ffffffffffff0000ffffff0000ffffffffffff0000ffffffffffffff0000ffffffffffff
+ffffff0000ffffffffffffff000000ffffffff0000ff00ffffffffff0000ffffffffffff
+0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff000000000000ffffffffffffffff0000ffffffff000000000000ffffffff
+00000000000000ffffff0000ffffffffffffff000000ffffffffffff0000ffffffffffff
+ffffff0000000000000000ffffffff0000000000ffffffff0000000000ffffff000000ff
+ffff00000000ff00000000ffffffff0000ffffff0000ffffffffffffff00ffffffffffff
+ff0000ffffffffffffff00000000000000ffffffff0000ffffffffffff000000ffffffff
+ff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffff00ffffffffffffffffffffffff0000ffffffffffff00
+ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffff
+ff0000ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff0000ff00000000ffffffff0000ff0000
+00ff000000000000ffffffffff000000000000ffff000000ffffffff000000000000ff00
+00000000ffff000000ffffffffff0000000000ffffffff00000000000000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffff
+ffffffffffffffffffff00ffffffffffffffffffff00ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff00ffffffffffffff00ffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000ffffff00
+00ffffff000000ffff000000ffffff0000ffffff0000ffffffff0000ffffff0000ffffff
+0000ffffff0000ffff0000ffffffffff0000ffffffff0000ffff000000ffffffff0000ff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff00ffffffffffffffffffffffffff0000ffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff0000ffffffff000000ffff0000ffffffff0000000000000000ffff00ffffffffff00
+00ffffff0000ffffff00ffffffffff00ffff0000ffffffffff0000ffffff0000ffffffff
+000000ffffff0000ffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffffffffffff00000000ffffffffffffffff
+ffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff0000ffffffffff0000ffff0000ffffff0000ffffffffffffff
+ff0000ffffffffff0000ffffff0000ffff0000ffffffffffffffff0000ffffffffff0000
+ffffff0000ffffffffff0000ffffff0000ffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff0000ffffffffff0000ffff0000ffffff
+0000ffffffffffffffff0000ffffffffff0000ffffff0000ffff0000ffffffffffffffff
+0000ffffffffff0000ffffff0000ffffffffff0000ffffff0000ffffff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff
+0000ffff0000ffffff000000ffffffffffffff0000ffffffffff0000ffffff0000ffff00
+0000ffffffffffffff0000ffffffffff0000ffffff0000ffffffffff0000ffffff0000ff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff0000ffffffffff00ffffff0000ffffffff0000ffffffffff00ff000000ffffffff00
+00ffffff0000ffff00000000ffffff0000ff0000ffffffffff0000ffffff0000ffffffff
+ff00ffffffff0000ffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff000000ffffff0000ffffff0000ffffffff00000000000000ff
+ffff000000ffffff000000ffff0000ffffff00000000000000ffff0000ffffffffff0000
+ffffffff0000ffffff0000ffffffff0000ffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff000000000000ffffffff0000000000ff
+ffff0000000000ffffffffff00000000ff000000ff00000000ffffff0000000000ffffff
+ff000000ffff00000000ffffffff00000000ffffffffff00000000ff00000000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff000000ffffffffffffffff
+ffff000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffff0000ffffff00ffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffff0000ffff0000
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff000000000000ffffff00000000ff
+ffffff0000ff00000000ffffff00000000ffffffffffffffffffffff0000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffff0000
+00ffff00ffffff0000ffffff0000ffff0000ffffff0000ffff0000ffffffffffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff0000ffffffff0000ffff000000000000ffffff0000ffff0000ffffff00ffffff0000
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff0000ffffffff0000ff0000ffffffffffffffff0000ffff0000
+ffffffffff00000000ffffffffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff0000ffffffff0000ff0000ffffffffff
+ffffff0000ffff0000ffffffff00ffff0000ffffffffffffffffff00ff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000ffffff00
+00ff0000ffffffffff00ffff0000ffff0000ffffff0000ffff0000ffffffffffffffffff
+00ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffff0000ffff0000ffffff00ffffff0000ffff0000ffff000000ff000000
+ffffffffffffffffff00ff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff000000000000ffffff00000000ffffff000000ffff0000
+0000ff00000000000000ffffffffffffffff00ff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000
+ffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff000000000000000000000000000000000000000000000000000000000000000000
+0000000000ffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffffffff
+ffffffffffffffff00000000ffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff000000000000000000ffffffffffffffff0000ff0000000000000000ff
+ffff0000000000ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffff0000ffffffffffff0000ffff
+ffffffffffffffffffffffffffffffffff00ffffffffffffff00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff0000ffffff00000000ffffffffff0000ffff
+ffff00000000ffffffffffff0000ffffffffffffffff0000ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff00ffffffffffff0000ffffffffffffffffffffffffffffffffffff0000ffff
+ffffff00000000ffffffffffffffffffffffffffffffffffffffff00ffffffffffffff00
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff0000
+00ffffffff00ffffffffffff00000000ffffffffff00ffffffffffffffffff00ffffffff
+ffff00ffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff00ffffffffff0000ffffffffffffffffffffffffff
+ffffffffff00ffffffffffffffff0000ffffffffffffffffffffffffffffff00ffffffff
+ff00ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+0000ffffffffffff0000ffffff00ffffffffffffffff000000ffffffff0000ffffffffff
+ffffff00ffffffffffff0000ffffffffffffffffffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffff0000ffffffff
+ffffffffffffffffffffffffff00ffffffffffffffffff0000ffffffffffffffffffffff
+ffffff0000ffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff0000ffffffffffff0000ffff0000ffffffffffffffffff000000ff
+ff0000ffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+ffffff0000ffffffffffffffffffffffffffffffff0000ffffffffffffffffff0000ffff
+ffffffffffffffffffffffff0000ffffffffffff0000ffffffffffff0000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff0000ffffffffffff0000ffff0000ffffffff
+ffffffffffff0000ffff00ffffffffffffffffff0000ffffffff000000000000ffffffff
+ffffffffffffffffffff000000000000ffffffff0000000000ffff00000000ffff000000
+00ffffffffffff0000ffffff0000ffffff0000000000ffff000000ffffff0000ffffffff
+ffffffffff0000ffffffffffffff0000ffffff000000000000ffffffff0000ffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff0000
+00ff0000ffffffffffffffffffffff00000000ffffffffffffffffff0000ffffffffffff
+ff0000ffffffffffffffffffffffffffffffffffff0000ffffffff000000ffff000000ff
+ffff0000ffffffff0000ffffffffffff000000ffff0000ffffffff0000ffffffffff00ff
+ffff0000ffffffffffffffffffff0000ffffffffffffff000000ffffffff0000ffffffff
+ffff000000ffffffffff000000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+0000ffffffff000000ffff0000ffffffffffffffffffffffff000000ffffffffffffffff
+ff0000ffffffffffffff0000ffffffffffffffffffffffffffffffffffff0000ffffffff
+000000ffffff0000ffffff0000ffffffff0000ffffffffffffff0000ffff0000ffffffff
+0000ffffffffff00ffffff0000ffffffffffffffffffff0000ffffffffffffff0000ffff
+ffffff0000ffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff00000000000000ffffffff0000ffffffffffffffffffffffff0000
+0000ffffffffffffffff0000ffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffff0000ffffffffff00ffffffff0000ffffff0000ffffffff0000ffffffffffffff00
+00ffff0000ffffffff000000ffffff0000ffffff0000ffffffffffffffffffff0000ffff
+ffffffffffffffffffffffff0000ffffffffffffff0000ffffffffffff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff0000ffffffffffffffffff0000ffffffffff
+ffffffffffff0000000000ffffffffffffffff0000ffffffffffffff0000ffff00000000
+000000000000000000ffffff0000ffffffffffffffff00000000ffffff0000ffffffff00
+00ffffffffffffff0000ffff0000ffffffffff0000ffffff00ffffffff0000ffffffffff
+ffffffffff0000ffffffffffffffffffffffffffff0000ffffffffffffff0000ffffffff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffff
+ffff0000ffffffffffffffffffffff00ffff000000ffffffffffffff0000ffffffffffff
+ff0000ffff00000000000000000000000000ffffff0000ffffffffffff0000ffff0000ff
+ffff0000ffffffff0000ffffffffffffff0000ffff0000ffffffffff000000ff0000ffff
+ffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffffff0000ffffffff
+ffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+0000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffff
+0000ffffffffffffffffff0000ffffffffffffffffffff00ffffffff000000ffffffffff
+ff0000ffffffffffffff0000ffffffffffffffffffffffffffffffffffff0000ffffffff
+ff0000ffffff0000ffffff0000ffffffff0000ffffffffffffff0000ffff0000ffffffff
+ffff0000ff0000ffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffff
+ffffff0000ffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff0000ffffffffffffffffff0000ffffffffffffffffff00ffffffff
+ff000000ffffffffffff0000ffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffff0000ffffffff0000ffffffff0000ffffff0000ffffffff0000ffffffffffffff00
+00ffff0000ffffffffffff00000000ffffffffff0000ffffffffffffffffffff0000ffff
+ffffffffffffffffffffffff0000ffffffffffffff0000ffffffffffff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff0000ffffffffffffffffff000000ffffffff
+ffffff0000ffffffffffff000000ffffffffff000000ffffffffffff0000ffffffffffff
+ffffffffffffffffffffffff0000ffffffff0000ffffffff0000ffffff0000ffffffff00
+00ffffffffffffff0000ffff0000ffffffffffffff000000ffffffffff000000ffffffff
+ffffffffff0000ffffffffffffff0000ffffffffff0000ffffffffffffff0000ffffffff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffff
+ffffff0000ffffffffffff0000ffffffffffffff00000000ffffffffff0000ffffffffff
+ff0000ff00ffffffffffffffffffffffffffffffff0000ff00ffff0000ffff00000000ff
+ffff000000ff0000000000ffffffffff0000ffffff0000ffffffffffffff0000ffffffff
+ffffff0000ffffffffffffffffff0000ffffffffffffff000000ffffffff0000ff00ffff
+ffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+00000000ffffffffffffffff0000ffffffff000000000000ffffffff00000000000000ff
+ffff0000ffffffffffffff000000ffffffffffffffffffffffffffffffffff000000ffff
+ff00000000ff00000000ffff00000000ff000000ffffffffff0000ffffff0000ffffffff
+ffffffff00ffffffffffffff0000ffffffffffffff00000000000000ffffffff0000ffff
+ffffffff000000ffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ff
+ffffffffffffffffffffffff0000ffffffffffffffff0000ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff0000ffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff00000000ff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff000000ffffffffffffffffffffffffffff000000ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffff
+ffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff00ffffffffffffffffffffffffffff00ffffffffffffffffffff00ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffff
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff00ffff0000ff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff00ffffffffffffffffffffffffffff0000ffffffffff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+00ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ff
+ffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffff00
+000000ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff0000ffffffffffffffffffffffffffffff0000ffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff000000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffffff00ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff0000ffff
+ffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00000000000000ff000000ffff000000
+0000ff000000ffffff000000000000ffffffffff000000000000ffffffffffffffffffff
+ffffffffff0000ffffffff0000000000ffffffffff00000000000000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffff0000ffffffffff0000ff00ffffffff0000ffffff0000ffffff0000ffffffff0000ff
+ffffffffffffffffffffffffffff0000ffffff0000ffffff0000ffffff00ffffff000000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff0000ffffffffff0000ffffffffff000000ffffffffff0000000000000000ffff
+00ffffffffff0000ffffffffffffffffffffffffffffff0000ffffff00ffffffff0000ff
+ffff0000ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffff0000ffffffffffff0000ffffffff0000
+ffffffffffffffff0000ffffffffff0000ffff00000000000000000000ffffff0000ffff
+ffffffff00000000ffffff0000ffffff0000ffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff0000ffffffffff
+ff0000ffffffff0000ffffffffffffffff0000ffffffffff0000ffffffffffffffffffff
+ffffffffff0000ffffffff0000ffff0000ffffff0000ffff0000ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffff0000ffffffffff00ff0000ffffff000000ffffffffffffff0000ffffffffff0000ff
+ffffffffffffffffffffffffffff0000ffffff0000ffffff0000ffffffff00000000ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff0000ffffffffff0000ffffffffff00ffff0000ffffff0000ffffffffff00ff00
+0000ffffffff0000ffffffffffffffffffffffffffffff0000ffffff0000ffffff0000ff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffff0000ffffffff00ffffff000000ffff00
+000000000000ffffff000000ffffff000000ffffffffffffffffffffffffffff0000ffff
+ff0000ffff000000ffffff00000000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffff0000000000ffff00000000ff000000
+00ffff00000000ffff0000000000ffffffffff00000000ff000000ffffffffffffffffff
+ffffffff0000000000ff00000000ff000000ffff00000000000000ffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffff00000000
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+0000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff00ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffff00ffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff000000000000ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+0000ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff00ffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+0000ffffffffffffffffffffff00ffffffff0000ffffffffffffffffffffffff00ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff
+ffff000000ff00000000ff00000000ffffffffffff0000000000ffffffffff0000000000
+ffffff0000000000ff0000ff00000000ffffffff000000ffffff00000000000000ffffff
+ffff00000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff
+ffffffffffffffffffffffffff00000000ffffff00000000ffffff0000ffff000000ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff00ffffff0000ffffff0000ffffff0000ffffffff0000ffffffff0000ffff000000
+ffffff0000ffff000000ffffff0000ffffff000000ffffff0000ffffffff0000ffffffff
+0000ffffff0000ffffff00ffffff000000ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffff0000ffffff0000ffff0000ffff
+0000ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffffff0000ffffff0000ffffffff0000ffff
+ff0000ffffffff000000ff0000ffffffff000000ffff0000ffffff0000ffffffff0000ff
+ffffff0000ffffffff0000ffffff0000ffffff0000ffffff0000ffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffff0000ffff
+ff00ffffff0000ffff0000ffffff0000ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff000000ffffffffff0000ffffff00
+00ffffffff0000ffffff0000ffffffffff0000ff0000ffffffffff0000ffff0000ffffff
+0000ffffffff0000ffffffff0000ffffffff0000ffffff0000ffffff0000ffffff0000ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffff00000000ffff0000ffffff0000ffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000ff
+ffffff0000ffffff0000ffffffff0000ffffff0000ffffffffff0000ff0000ffffffffff
+0000ffff0000ffffff0000ffffffff0000ffffffff0000ffffffff0000ffffff0000ffff
+ff0000ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff0000ffffffffff
+ffffffffffffffffffffffffffff0000ffffffff00ffff0000ffff0000ffffff0000ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff000000ffffff0000ffffff0000ffffffff0000ffffff0000ffffffffff00
+00ff0000ffffffffff0000ffff0000ffffff0000ffffffff0000ffffffff0000ffffffff
+0000ffffff0000ffffffff00000000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+00ff0000ffffffffffffffffffffffffffffffffffffff0000ffffff0000ffff0000ffff
+0000ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff00ffffffff00ffffff0000ffffff0000ffffffff0000ffff
+ff0000ffffffffff00ffff0000ffffffffff00ffffff0000ffffff0000ffffffff0000ff
+ffffff0000ffffffff0000ffffff0000ffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff00ff0000ffffffffffffffffffffffffffffffffffffff0000ffff
+000000ff000000ffff0000ffff000000ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffff00ffffffff00ffffff0000ffffff00
+00ffffffff0000ffffffff0000ffffff0000ffffff0000ffffff0000ffffff0000ffffff
+0000ffffffff0000ffffffff0000ffffffff0000ffffff0000ffffff00000000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff00ff0000ffffffffffffffffffffffffffff
+ffffffffff00000000ff00000000000000ffff000000ff0000ffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000ff
+ff0000000000ff0000000000ff00000000ffffffff00000000ffffffffffff00000000ff
+ffffffffff0000000000000000ff00000000ffff00000000ffff00000000ff00000000ff
+ff00000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff00ffff0000000000ffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff00ffffffffffffff00ffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffff0000000000000000000000000000
+000000000000000000000000000000000000000000000000ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffff000000
+00ffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffff0000ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff00ffffff0000ffffffff00ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff0000000000ffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000ff
+ffffffffffffff0000ff0000000000000000ffffff0000000000ffffffffffffff0000ff
+ffffffffffffff00ffffffffffffff0000ffffffffffffffffffffffffffffffffffffff
+0000ffffffffffff0000ffffffffffffffffffffffff0000000000000000000000000000
+ff00ffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff0000ffffff00000000ffffffffff0000ffffffff00000000ffffffffffff0000ffffff
+ffffffffff0000ffffffffffffffffffff00ffffffffffff0000ffffffffffffffffffff
+ffffffffffffffff0000ffffffffff00000000ffffffffffffffffffffffff0000ffffff
+ff0000ffffffff0000ffff00ffffffffffffff00ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff0000ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff000000ffffffffffff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff000000ffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffff000000ffffffff00ffffffffffff00000000ff
+ffffffff00ffffffffffffffffff00ffffffffffff00ffffffffff00ffffffffff0000ff
+ffffffffffffffffffffffffffffffffff00ffffffffffffffff0000ffffffffffffffff
+ffffffff0000ffffffff0000ffffffffff00ffffff00ffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+00ffff0000ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff0000ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff0000ffffffffffff0000ffffff00ffffff
+ffffffffff000000ffffffff0000ffffffffffffffff00ffffffffffff0000ffffffffff
+0000ffffffff0000ffffffffffffffffffffffffffffffffff00ffffffffffffffffff00
+00ffffffffffffffffffffffff00ffffffffff0000ffffffffff00ffffff0000ffffffff
+ffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff
+0000ffff0000ffffffffffffffffff000000ffff0000ffffffffffffffff0000ffffffff
+ffff0000ffffffffffff0000ffffff0000ffffffffffffffffffffffffffffffff0000ff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffff0000ffffffffffff
+ffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff0000ffffffffffff0000ffff0000ffffffffffffffffffff0000ffff00ffffffffffff
+ffffff0000ffffffff000000000000ffffffff0000ffffff0000ffffff0000000000ffff
+000000ffffff0000ffffffffffffffffff0000ffffffffffffff0000ffffffffffffffff
+ff0000ffffffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff
+ffffff00ffffffff0000ffffffffffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffff000000ff0000ffffffffffffffffffffff0000
+0000ffffffffffffffffff0000ffffffffffffff0000ffffffffffff000000ffff0000ff
+ffffff0000ffffffffff00ffffff0000ffffffffffffffffffff0000ffffffffffffff00
+0000ffffffffffffffff0000ffffffffffffffffffff000000ffffffffff000000ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000
+00000000ff000000ffff0000000000ff000000ffffff000000000000ffffffffff000000
+000000ffffffffffffff000000ffffff00000000000000ffffff0000000000ffffff0000
+00000000ffff0000ff0000000000000000ffff000000ffff0000000000ffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff0000ffffffff000000ffff0000ffffffff
+ffffffffffffffff000000ffffffffffffffffff0000ffffffffffffff0000ffffffffff
+ffff0000ffff0000ffffffff0000ffffffffff00ffffff0000ffffffffffffffffffff00
+00ffffffffffffff0000ffffffffffffffffff0000ffffffffffffffffffffff0000ffff
+ffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffff0000ffffffffff0000ff00ffffffff0000ffffff00
+00ffffff0000ffffffff0000ffffffffffffffff0000ffffffff0000ffffff0000ffffff
+0000ffffffff0000ffffff0000ffff000000ffff00ffff0000ffffffff00ffff0000ffff
+ff0000ffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000ff
+ffffff0000ffffffffffffffffffffffff00000000ffffffffffffffff0000ffffffffff
+ffff0000ffffffffffffff0000ffff0000ffffffff000000ffffff0000ffffff0000ffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffff0000ffffffffffff
+ffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffff0000ffffffffff000000ffff
+ffffff0000000000000000ffff00ffffffffff0000ffffffffffffffff0000ffffffff00
+00ffffff0000ffffff0000ffffffff0000000000000000ff0000ffffffffffff0000ffff
+ff0000ffff00ffffffff0000ffffffff0000ffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff0000ffffffffffffffffff0000ffffffffffffffffffffff0000000000ffffffffffff
+ffff0000ffffffffffffff0000ffffffffffffff0000ffff0000ffffffffff0000ffffff
+00ffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ff0000ffffffffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff0000ff
+ffffffffff0000ffffffff0000ffffffffffffffff0000ffffffffff0000ffffffffffff
+ffff0000ffffffff0000ffffff0000ffffff0000ffffff0000ffffffffffffffff0000ff
+ffffffffffff0000ffff00ffffffffffff00000000ffffffff0000ffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffff0000ffffffffffffffffffffff00ff
+ff000000ffffffffffffff0000ffffffffffffff0000ffffffffffffff0000ffff0000ff
+ffffffff000000ff0000ffffffff0000ffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffffffff0000ffffffffffff0000ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffff0000ffffffffffff0000ffffffff0000ffffffffffffffff0000ffffffff
+ff0000ffffffffffffffff0000ffffffff0000ffffff0000ffffff0000ffffff0000ffff
+ffffffffffff0000ffffffffffffff0000ff0000ffffffff0000ffff0000ffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffff0000ffffffff
+ffffffffffff00ffffffff000000ffffffffffff0000ffffffffffffff0000ffffffffff
+ffff0000ffff0000ffffffffffff0000ff0000ffffffff0000ffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffff0000ffff
+ffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffff0000ffffffffff00ff0000ffffff000000ffffffff
+ffffff0000ffffffffff0000ffffffffffffffff0000ffffffff0000ffffff0000ffffff
+0000ffffff000000ffffffffffffff0000ffffffffffffffff000000ffffffff0000ffff
+ff0000ffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff
+ffffff0000ffffffffffffffffff00ffffffffff000000ffffffffffff0000ffffffffff
+ffff0000ffffffffffffff0000ffff0000ffffffffffff00000000ffffffffff0000ffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffffffffff0000ffffffffffff
+ffffffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffff0000ffffffffff00ffff0000
+ffffff0000ffffffffff00ff000000ffffffff0000ffffffffffffffff0000ffffffff00
+00ffffff0000ffffff0000ffffffff0000ffffffffff00ff0000ffffffffffffffff0000
+00ffffffff0000ffffff0000ffffffff0000ffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff0000ffffffffffffffffff000000ffffffffffffff0000ffffffffffff000000ffffff
+ffff000000ffffffffffff0000ffffffffffffff0000ffff0000ffffffffffffff000000
+ffffffffff000000ffffffffffffffffff0000ffffffffffffff0000ffffffffffffffff
+ff0000ffffffffffffffffffffff0000ffffffffffff0000ffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff0000ff
+ffffff00ffffff000000ffff00000000000000ffffff000000ffffff000000ffffffffff
+ffff0000ffffffff0000ffffff0000ffffff0000ffffffff00000000000000ffff0000ff
+ffffffffffffff000000ffffffff0000ffff000000ffffffff0000ffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffffff0000ffffffffffff0000ffffffff
+ffffff00000000ffffffffff0000ffffffffffff0000ff00ffffffff0000ffffff0000ff
+ffffffffffff0000ffffffffffffff0000ffffffffffffffffff0000ffffffffffffff00
+0000ffffffffffffffff000000ffffffffffffffffff0000ffffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+000000ffff00000000ff00000000ffff00000000ffff0000000000ffffffffff00000000
+ff000000ffffffffffff00000000ffff00000000ff00000000ffffff000000ffffff0000
+000000ffff0000000000ffffffffffffff00ffffffffff00000000ff000000ffff000000
+0000ffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffff
+ffffffffffffffffffffffffffffffffff000000000000ffffffffffffffff0000ffffff
+ff000000000000ffffffff00000000000000ffffff0000ffffffffffffff000000ffffff
+ff0000ffffff0000ffffffffffffffff00ffffffffffffff0000ffffffffffffff000000
+00000000ffffffff0000ffffffffffffff00000000000000ffffffffffffff0000ffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffffffffffffffffff0000ffffffffffffffffffffffffff0000ffffffffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff
+ffffffffff00ffffffffffffffffffffffff00ffffffffffffffffffffffffffff00ffff
+ffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffff00ffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffff
+ffffffffffffffffffffffffffff0000ffffffffffffffffffff00ffffffffffffffffff
+ffffffffff0000ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffff00ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffff007f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f00ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff0000
+ffffffffffffffffffffff00000000ffffffffffffffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff
+0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff000000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffff000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffffffffffffff0000ffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+00ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffff0000ffffffffffffffffffffff00ffffffff0000ffffff
+ffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff00000000ffffff000000ff00000000ff00000000ffffffffffff0000
+000000ffffffffff0000000000ffffff0000000000ff0000ff00000000ffffffff000000
+ffffff00000000000000ffffffffff00000000000000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff00ffffff0000ffffff0000ffffff0000ffffffff
+0000ffffffff0000ffff000000ffffff0000ffff000000ffffff0000ffffff000000ffff
+ff0000ffffffff0000ffffffff0000ffffff0000ffffff00ffffff000000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffff0000
+ffffff0000ffffffff0000ffffff0000ffffffff000000ff0000ffffffff000000ffff00
+00ffffff0000ffffffff0000ffffffff0000ffffffff0000ffffff0000ffffff0000ffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+0000ffffffffff0000ffffff0000ffffffff0000ffffff0000ffffffffff0000ff0000ff
+ffffffff0000ffff0000ffffff0000ffffffff0000ffffffff0000ffffffff0000ffffff
+0000ffffff0000ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff000000ffffffff0000ffffff0000ffffffff0000ffffff0000ffff
+ffffff0000ff0000ffffffffff0000ffff0000ffffff0000ffffffff0000ffffffff0000
+ffffffff0000ffffff0000ffffff0000ffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff00ff0000ffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff000000ffffff0000ffffff0000ffffffff
+0000ffffff0000ffffffffff0000ff0000ffffffffff0000ffff0000ffffff0000ffffff
+ff0000ffffffff0000ffffffff0000ffffff0000ffffffff00000000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff0000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffff00ffffff0000
+ffffff0000ffffffff0000ffffff0000ffffffffff00ffff0000ffffffffff00ffffff00
+00ffffff0000ffffffff0000ffffffff0000ffffffff0000ffffff0000ffffff0000ffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+00ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffff00ffffff0000ffffff0000ffffffff0000ffffffff0000ffffff0000ffffff0000
+ffffff0000ffffff0000ffffff0000ffffffff0000ffffffff0000ffffffff0000ffffff
+0000ffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff00ff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff0000000000ffff0000000000ff0000000000ff00000000ffffffff0000
+0000ffffffffffff00000000ffffffffffff0000000000000000ff00000000ffff000000
+00ffff00000000ff00000000ffff00000000000000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffff00ffff0000000000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffff
+ffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffff0000ffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffff000000000000ffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffff00000000ffffff0000
+0000000000ffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff00ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff00
+ffff0000ffff00ffff0000ff0000ffffffff0000ffffffffffffffffffffffffffffffff
+ffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff
+ffffffffffffff0000ffffffffff0000ffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffff0000ffffffffff0000ffffffffff0000ffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff0000ffffffffffffffffffffffffffffff0000ffffffffff0000ffff
+ffffff0000ffffffffff00ffffffff0000ffffffffffffffffffffffffffffffffffffff
+0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff000000ffffffffffff0000000000ffff00000000
+00000000000000000000ffff0000ffffff000000ffffff00000000000000ffffffffff00
+0000000000ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffff0000ff
+ff000000ffffff0000ffffffffff0000ffffffffff0000ffffffff0000ffffffff0000ff
+ffff0000ffffff0000ffffff0000ffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+ffffffffff0000ffffffff000000ffff0000ffffffffff0000ffffffffff0000ffffffff
+0000ffffffff0000ffffff0000ffffff0000000000000000ffffffff0000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff0000ffffffffff0000ffffffffff0000ffff0000ffffffffff0000ffff
+ffffff0000ffffffff0000ffffffff0000ffffff0000ffff0000ffffffffffffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffff0000ffffffffff0000ffffffffff0000ffff0000
+ffffffffff0000ffffffffff0000ffffffff0000ffffffff0000ffffff0000ffff0000ff
+ffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffff0000ffff
+ffffff0000ffff0000ffffffffff0000ffffffffff0000ffffffff0000ffffffff0000ff
+ffff0000ffff000000ffffffffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+ffffffffff0000ffffffffff00ffffff0000ffffffffff0000ffffffffff0000ffffffff
+0000ffffffff0000ffffff0000ffffff0000ffffffffff00ffffffff0000ffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffff00ffffffffffff0000ffffff0000ffffff0000ffffffffff0000ffff
+ffffff0000ffffffff0000ffffffff0000ffffff0000ffffff00000000000000ffffffff
+ff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff0000ffffffffffff00000000ffffffff000000
+0000ffff0000000000ffff0000000000ff00000000ffff00000000ff00000000ffffff00
+00000000ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffff
+end
+%%PageTrailer
+%%Trailer
+%%EOF
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/filter.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/filter.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,74 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 2550 5100 6300 6375
+2 2 0 1 0 0 100 0 10 0.000 0 0 -1 0 0 5
+ 2550 5475 4050 5475 4050 5775 2550 5775 2550 5475
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 0 0 2
+ 4050 5475 6300 5475
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3300 6375 3300 5925
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 1 1 2
+ 0 0 1.00 60.00 120.00
+ 0 0 1.00 60.00 120.00
+ 3450 6225 4050 6225
+4 0 0 100 0 0 18 0.0000 4 165 90 4050 5325 t\001
+4 0 0 100 0 0 18 0.0000 4 165 345 3600 6075 tau\001
+-6
+2 2 0 1 0 0 100 0 10 0.000 0 0 -1 0 0 5
+ 2550 2025 4050 2025 4050 2325 2550 2325 2550 2025
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 0 0 2
+ 4050 2025 6300 2025
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4050 2925 4050 2475
+2 2 0 1 0 0 100 0 10 0.000 0 0 -1 0 0 5
+ 2550 7200 6375 7200 6375 7500 2550 7500 2550 7200
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3975 8025 3975 7575
+2 2 0 1 0 0 100 0 10 0.000 0 0 -1 0 0 5
+ 2475 3525 3975 3525 3975 3825 2475 3825 2475 3525
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 0 0 2
+ 3975 3525 6225 3525
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 1 1 2
+ 0 0 1.00 60.00 120.00
+ 0 0 1.00 60.00 120.00
+ 3975 4275 4575 4275
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4650 4425 4650 3975
+2 2 0 1 0 0 100 0 10 0.000 0 0 -1 0 0 5
+ 2550 600 4050 600 4050 900 2550 900 2550 600
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 0 0 2
+ 4050 600 6300 600
+2 1 0 1 0 0 100 0 10 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4050 1500 4050 1050
+4 0 0 100 0 0 18 0.0000 4 165 90 4050 1875 t\001
+4 0 0 50 0 0 24 0.0000 4 330 3540 6675 2175 argmax P(x(1:t) | y(1:t))\001
+4 0 0 50 0 0 24 0.0000 4 330 810 6975 2550 x(1:t)\001
+4 0 0 100 0 0 18 0.0000 4 165 90 3975 7125 t\001
+4 0 0 100 0 0 18 0.0000 4 195 180 6225 7125 T\001
+4 0 0 100 0 0 18 0.0000 4 165 90 3975 3375 t\001
+4 0 0 100 0 0 18 0.0000 4 195 540 3975 4125 delta\001
+4 0 0 100 0 0 18 0.0000 4 165 90 4050 450 t\001
+4 0 0 50 0 0 24 0.0000 4 330 1950 6675 750 P(X(t)|y(1:t))\001
+4 0 0 50 0 0 24 0.0000 4 330 2865 6450 3600 P(X(t+delta)|y(1:t))\001
+4 0 0 50 0 0 24 0.0000 4 330 2550 6600 5550 P(X(t-tau)|y(1:t))\001
+4 0 0 50 0 0 24 0.0000 4 330 2085 6750 7425 P(X(t)|y(1:T))\001
+4 0 0 50 0 0 24 0.0000 4 330 1140 600 900 filtering\001
+4 0 0 50 0 0 24 0.0000 4 330 1470 450 3750 prediction\001
+4 0 0 50 0 0 24 0.0000 4 255 1005 600 2175 Viterbi\001
+4 0 0 50 0 0 24 0.0000 4 330 1305 600 5775 fixed-lag\001
+4 0 0 50 0 0 24 0.0000 4 330 1575 525 7740 smoothing\001
+4 0 0 50 0 0 24 0.0000 4 330 1170 525 8055 (offline)\001
+4 0 0 50 0 0 24 0.0000 4 330 1575 600 6090 smoothing\001
+4 0 0 50 0 0 24 0.0000 4 255 1920 525 7425 fixed interval\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/filter.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/filter.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/filter.tex
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/filter.tex Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+%Latex
+\documentstyle[fleqn,psfig,12pt,pstricks,pst-node,pst-tree]{article}
+
+%\documentclass[fleqn,12pt]{article}
+%\usepackage{pstricks,pst-node,pst-tree}
+
+%latex2e
+%\documentclass{article}
+%\usepackage{epsfig,alltt,fancybox}
+
+\setlength{\textwidth}{6.5in}
+\setlength{\oddsidemargin}{0in}
+\setlength{\textheight}{8.5in}
+\setlength{\headheight}{0in}
+\setlength{\headsep}{-0.5in}
+\setlength{\parindent}{0in} % block style
+\setlength{\parskip}{0.3cm}
+
+\newcommand{\mytitle}[1]{\newpage \huge \begin{center}#1\vspace{0.8cm}\end{center} \LARGE}
+
+
+\begin{document}
+
+\mytitle{Hello world}
+
+Hello world
+
+\centering
+$
+\pstree[treemode=R]{\Tcircle{b}}{%
+ \pstree{\TC*^{a_1}}{%
+ \Tr{b_{11}}^{x_1}
+ \Tr{b_{12}}_{x_2}
+ }
+ \pstree{\TC*_{a_2}}{%
+ \Tr{b_{21}}^{x_1}
+ \Tr{b_{22}}_{x_2}
+ }
+}
+$
+
+
+
+\mytitle{Hello world 2}
+
+\psline[linewidth=0.5cm](0,0)(2,0)
+\psline[linewidth=0.05cm](2,-1)(6,-1)
+
+
+
+\end{document}
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/gaussplot.png
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/gaussplot.png has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hme.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hme.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+6 225 150 2625 3825
+6 300 150 1800 3450
+5 1 0 1 -1 -1 0 0 -1 0.000 0 1 1 0 2212.500 2250.000 825 1425 600 2325 825 3075
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 -1 0 0 -1 0.000 0 1 1 0 3083.468 1905.242 750 525 375 2025 675 3150
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 -1 0 0 -1 0.000 0 0 1 0 679.747 1409.810 1200 450 1725 1725 1275 2325
+ 0 0 1.00 60.00 120.00
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 975 375 300 225 975 375 1275 600
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1050 3225 300 225 1050 3225 1350 3450
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 825 1050 1200 1050 1200 1500 825 1500 825 1050
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 825 2100 1200 2100 1200 2550 825 2550 825 2100
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 600 1050 1050
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 1575 1050 2100
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 2625 1050 3000
+-6
+4 0 -1 0 0 0 12 0.0000 4 180 2370 225 3750 Hierarchical Mixture of Experts\001
+4 0 -1 0 0 0 12 0.0000 4 135 120 825 450 X\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 900 1350 Q1\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 900 2400 Q2\001
+4 0 -1 0 0 0 12 0.0000 4 135 135 900 3300 Y\001
+-6
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hme.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hme.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hme_dec_boundary.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hme_dec_boundary.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hme_dec_boundary.png
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hme_dec_boundary.png has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm3.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm3.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 525 1650 270 270 525 1650 675 1875
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1575 1650 270 270 1575 1650 1725 1875
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 2625 1650 270 270 2625 1650 2775 1875
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 375 600 750 600 750 1050 375 1050 375 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 525 1050 525 1425
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 825 1350 825
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1425 600 1800 600 1800 1050 1425 1050 1425 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1575 1050 1575 1425
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1800 825 2325 825
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2400 600 2775 600 2775 1050 2400 1050 2400 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2550 1050 2550 1425
+4 0 0 100 0 2 12 0.0000 4 135 90 525 975 1\001
+4 0 0 100 0 2 12 0.0000 4 135 90 1575 975 3\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2475 900 5\001
+4 0 0 100 0 2 12 0.0000 4 135 90 450 1725 2\001
+4 0 0 100 0 2 12 0.0000 4 135 90 1425 1725 4\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2550 1725 6\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm3.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm3.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm3letter.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm3letter.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 525 1650 270 270 525 1650 675 1875
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1575 1650 270 270 1575 1650 1725 1875
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 2625 1650 270 270 2625 1650 2775 1875
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 375 600 750 600 750 1050 375 1050 375 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 525 1050 525 1425
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 825 1350 825
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1425 600 1800 600 1800 1050 1425 1050 1425 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1575 1050 1575 1425
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1800 825 2325 825
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2400 600 2775 600 2775 1050 2400 1050 2400 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2550 1050 2550 1425
+4 0 0 100 0 0 12 0.0000 4 135 225 450 1725 Y1\001
+4 0 0 100 0 0 12 0.0000 4 135 225 1425 1725 Y2\001
+4 0 0 100 0 0 12 0.0000 4 135 225 2475 1725 Y3\001
+4 0 0 100 0 0 12 0.0000 4 135 225 450 900 X1\001
+4 0 0 100 0 0 12 0.0000 4 135 225 2475 900 X3\001
+4 0 0 100 0 0 12 0.0000 4 135 225 1500 900 X2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm3letter.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm3letter.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm3letter.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm3letter.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm4.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm4.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+1 3 0 2 0 7 50 0 -1 0.000 1 0.0000 600 1200 300 300 600 1200 900 1200
+1 3 0 2 0 7 50 0 -1 0.000 1 0.0000 1800 1200 300 300 1800 1200 2100 1200
+1 3 0 2 0 7 50 0 -1 0.000 1 0.0000 3000 1200 300 300 3000 1200 3300 1200
+1 3 0 2 0 7 50 0 -1 0.000 1 0.0000 4200 1200 300 300 4200 1200 4500 1200
+1 3 0 2 0 0 50 0 7 0.000 1 0.0000 600 2400 300 300 600 2400 900 2400
+1 3 0 2 0 0 50 0 7 0.000 1 0.0000 3000 2400 300 300 3000 2400 3300 2400
+1 3 0 2 0 0 50 0 7 0.000 1 0.0000 4200 2400 300 300 4200 2400 4500 2400
+1 3 0 2 0 0 50 0 7 0.000 1 0.0000 1800 2400 300 300 1800 2400 2100 2400
+2 1 0 2 0 0 50 0 7 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 600 1500 600 2100
+2 1 0 2 0 0 50 0 7 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 900 1200 1500 1200
+2 1 0 2 0 0 50 0 7 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 2100 1200 2700 1200
+2 1 0 2 0 0 50 0 7 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 3375 1200 3900 1200
+2 1 0 2 0 0 50 0 7 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 1800 1500 1800 2100
+2 1 0 2 0 0 50 0 7 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 3000 1500 3000 2100
+2 1 0 2 0 0 50 0 7 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 4200 1500 4200 2100
+4 0 0 100 0 0 30 0.0000 4 60 600 5025 1200 . . .\001
+4 0 0 50 0 0 24 0.0000 4 255 270 450 1275 X\001
+4 0 0 50 0 0 24 0.0000 4 255 270 1650 1275 X\001
+4 0 0 50 0 0 24 0.0000 4 255 270 2850 1275 X\001
+4 0 0 50 0 0 24 0.0000 4 255 240 450 2475 Y\001
+4 0 0 50 0 0 24 0.0000 4 255 240 2850 2475 Y\001
+4 0 0 50 0 0 24 0.0000 4 255 240 4050 2475 Y\001
+4 0 0 50 0 0 24 0.0000 4 255 180 675 1425 1\001
+4 0 0 50 0 0 24 0.0000 4 255 180 1875 1350 2\001
+4 0 0 50 0 0 24 0.0000 4 255 180 3075 1350 3\001
+4 0 0 50 0 0 24 0.0000 4 255 180 4275 1425 4\001
+4 0 0 50 0 0 24 0.0000 4 255 180 600 2550 1\001
+4 0 0 50 0 0 24 0.0000 4 255 180 3000 2550 3\001
+4 0 0 50 0 0 24 0.0000 4 255 180 4200 2550 4\001
+4 0 0 50 0 0 24 0.0000 4 255 270 4050 1275 X\001
+4 0 0 50 0 0 24 0.0000 4 255 180 1800 2625 2\001
+4 0 0 50 0 0 24 0.0000 4 255 240 1650 2475 Y\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm4.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm4.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm4_params.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm4_params.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,81 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 3150 2550 270 270 3150 2550 3300 2775
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 3975 2550 270 270 3975 2550 4125 2775
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1350 2550 270 270 1350 2550 1500 2775
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 2250 2550 270 270 2250 2550 2400 2775
+1 3 1 1 0 7 100 0 -1 4.000 1 0.0000 2625 3825 270 270 2625 3825 2775 4050
+1 3 1 1 0 7 100 0 -1 4.000 1 0.0000 1425 600 270 270 1425 600 1575 825
+1 3 1 1 0 7 100 0 -1 4.000 1 0.0000 3000 600 270 270 3000 600 3150 825
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2925 1500 3300 1500 3300 1950 2925 1950 2925 1500
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3075 1950 3075 2325
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 3750 1500 4125 1500 4125 1950 3750 1950 3750 1500
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3300 1725 3750 1725
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3975 1950 3975 2250
+2 1 0 1 0 0 100 0 20 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1575 1725 2100 1725
+2 1 0 1 0 0 100 0 20 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2475 1725 2925 1725
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1200 1500 1575 1500 1575 1950 1200 1950 1200 1500
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1350 1950 1350 2325
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2100 1500 2475 1500 2475 1950 2100 1950 2100 1500
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2250 1950 2250 2325
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2475 3600 1500 2775
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2550 3600 2400 2850
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2700 3525 3075 2775
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2850 3675 3825 2775
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1425 900 1425 1500
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2925 900 2400 1500
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3075 900 3075 1425
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3225 750 3900 1500
+4 0 0 100 0 0 12 0.0000 4 135 225 3000 2625 Y3\001
+4 0 0 100 0 0 12 0.0000 4 165 225 3825 1800 Q4\001
+4 0 0 100 0 0 12 0.0000 4 165 225 3000 1800 Q3\001
+4 0 0 100 0 0 12 0.0000 4 135 225 3825 2625 Y4\001
+4 0 0 100 0 0 12 0.0000 4 135 225 1275 2625 Y1\001
+4 0 0 100 0 0 12 0.0000 4 165 225 1275 1800 Q1\001
+4 0 0 100 0 0 12 0.0000 4 135 225 2100 2625 Y2\001
+4 0 0 100 0 0 12 0.0000 4 165 225 2175 1800 Q2\001
+4 0 0 100 0 0 12 0.0000 4 135 195 1275 675 P1\001
+4 0 0 100 0 0 12 0.0000 4 135 195 2850 675 P2\001
+4 0 0 100 0 0 12 0.0000 4 135 195 2550 3900 P3\001
+4 0 0 100 0 0 30 0.0000 4 30 525 4350 1800 . . .\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm4_params.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm4_params.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm4_paramsX.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm4_paramsX.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,81 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 3150 2550 270 270 3150 2550 3300 2775
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 3975 2550 270 270 3975 2550 4125 2775
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1350 2550 270 270 1350 2550 1500 2775
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 2250 2550 270 270 2250 2550 2400 2775
+1 3 1 1 0 7 100 0 -1 4.000 1 0.0000 1425 600 270 270 1425 600 1575 825
+1 3 1 1 0 7 100 0 -1 4.000 1 0.0000 3000 600 270 270 3000 600 3150 825
+1 3 1 1 0 7 100 0 -1 4.000 1 0.0000 2615 3863 270 270 2615 3863 2765 4088
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2925 1500 3300 1500 3300 1950 2925 1950 2925 1500
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3075 1950 3075 2325
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 3750 1500 4125 1500 4125 1950 3750 1950 3750 1500
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3300 1725 3750 1725
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3975 1950 3975 2250
+2 1 0 1 0 0 100 0 20 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1575 1725 2100 1725
+2 1 0 1 0 0 100 0 20 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2475 1725 2925 1725
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1200 1500 1575 1500 1575 1950 1200 1950 1200 1500
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1350 1950 1350 2325
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2100 1500 2475 1500 2475 1950 2100 1950 2100 1500
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2250 1950 2250 2325
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2475 3600 1500 2775
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2550 3600 2400 2850
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2700 3525 3075 2775
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2850 3675 3825 2775
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1425 900 1425 1500
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2925 900 2400 1500
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3075 900 3075 1425
+2 1 1 1 0 7 100 0 -1 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3225 750 3900 1500
+4 0 0 100 0 0 12 0.0000 4 135 225 3000 2625 Y3\001
+4 0 0 100 0 0 12 0.0000 4 135 225 3825 2625 Y4\001
+4 0 0 100 0 0 12 0.0000 4 135 225 1275 2625 Y1\001
+4 0 0 100 0 0 12 0.0000 4 135 225 2100 2625 Y2\001
+4 0 0 100 0 0 30 0.0000 4 60 600 4350 1800 . . .\001
+4 0 0 50 0 0 12 0.0000 4 135 225 1275 1800 X1\001
+4 0 0 50 0 0 12 0.0000 4 135 225 2175 1800 X2\001
+4 0 0 50 0 0 12 0.0000 4 135 225 3000 1800 X3\001
+4 0 0 50 0 0 12 0.0000 4 135 225 3825 1800 X4\001
+4 0 0 50 0 0 18 0.0000 4 195 180 2550 3975 B\001
+4 0 0 50 0 0 18 0.0000 4 255 210 1275 750 pi\001
+4 0 0 50 0 0 18 0.0000 4 195 225 2850 750 A\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm4_square.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm4_square.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+6 1425 600 2025 1950
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 1725 1650 270 270 1725 1650 1875 1875
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1575 600 1950 600 1950 1050 1575 1050 1575 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1725 1050 1725 1425
+4 0 0 100 0 0 12 0.0000 4 135 225 1575 1725 Y2\001
+4 0 0 100 0 0 12 0.0000 4 165 225 1650 900 Q2\001
+-6
+6 525 600 1125 1950
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 825 1650 270 270 825 1650 975 1875
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 675 600 1050 600 1050 1050 675 1050 675 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 1050 825 1425
+4 0 0 100 0 0 12 0.0000 4 135 225 750 1725 Y1\001
+4 0 0 100 0 0 12 0.0000 4 165 225 750 900 Q1\001
+-6
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 2625 1650 270 270 2625 1650 2775 1875
+1 3 0 1 0 0 100 0 2 0.000 1 0.0000 3450 1650 270 270 3450 1650 3600 1875
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2400 600 2775 600 2775 1050 2400 1050 2400 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2550 1050 2550 1425
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 3225 600 3600 600 3600 1050 3225 1050 3225 600
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2775 825 3225 825
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3450 1050 3450 1350
+2 1 0 1 0 0 100 0 20 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 825 1575 825
+2 1 0 1 0 0 100 0 20 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 825 2400 825
+4 0 0 100 0 0 12 0.0000 4 135 225 2475 1725 Y3\001
+4 0 0 100 0 0 12 0.0000 4 165 225 3300 900 Q4\001
+4 0 0 100 0 0 12 0.0000 4 165 225 2475 900 Q3\001
+4 0 0 100 0 0 12 0.0000 4 135 225 3300 1725 Y4\001
+4 0 0 100 0 0 30 0.0000 4 30 525 3825 975 . . .\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_ar.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm_ar.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+6 225 225 2025 1875
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 525 1275 300 225 525 1275 825 1500
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1575 1275 300 225 1575 1275 1875 1500
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 450 1350 450
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 375 225 750 225 750 675 375 675 375 225
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 525 675 525 1050
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1425 225 1800 225 1800 675 1425 675 1425 225
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1575 675 1575 1050
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 1275 1275 1275
+4 0 -1 0 0 0 12 0.0000 4 165 225 450 525 Q1\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 1500 525 Q2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 375 1350 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1425 1350 Y2\001
+4 0 -1 0 0 0 12 0.0000 4 180 1785 225 1800 Auto Regressive HMM\001
+-6
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_ar.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm_ar.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_coupled.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm_coupled.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+6 300 225 1950 675
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 600 450 300 225 600 450 900 675
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1650 450 300 225 1650 450 1950 675
+-6
+6 450 1875 1875 2325
+6 450 1875 1875 2325
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 2100 1425 2100
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 450 1875 825 1875 825 2325 450 2325 450 1875
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1500 1875 1875 1875 1875 2325 1500 2325 1500 1875
+-6
+-6
+6 450 1050 1875 1500
+6 450 1050 1875 1500
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1275 1425 1275
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 450 1050 825 1050 825 1500 450 1500 450 1050
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1500 1050 1875 1050 1875 1500 1500 1500 1500 1050
+-6
+-6
+6 300 2700 1950 3150
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 600 2925 300 225 600 2925 900 3150
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1650 2925 300 225 1650 2925 1950 3150
+-6
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 600 2325 600 2700
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1650 2325 1650 2700
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 600 1050 600 675
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1650 1050 1650 675
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 2100 1425 1350
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 900 1275 1350 2025
+4 0 -1 0 0 0 12 0.0000 4 135 210 450 525 C1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 525 1350 A1\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 525 2175 B1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 450 3000 D1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1500 3075 D2\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 1500 525 C2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1575 1350 A2\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 1575 2175 B2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_coupled.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm_coupled.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_factorial.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm_factorial.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 1 0 1501.355 1323.343 675 450 300 1275 525 2025
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 1 0 2263.600 1307.555 1650 525 1275 1200 1575 2025
+ 0 0 1.00 60.00 120.00
+6 675 1050 2100 1500
+6 675 1050 2100 1500
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1125 1275 1650 1275
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 675 1050 1050 1050 1050 1500 675 1500 675 1050
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1725 1050 2100 1050 2100 1500 1725 1500 1725 1050
+-6
+-6
+6 675 225 2100 675
+6 675 225 2100 675
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1125 450 1650 450
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 675 225 1050 225 1050 675 675 675 675 225
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1725 225 2100 225 2100 675 1725 675 1725 225
+-6
+-6
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 825 2100 300 225 825 2100 1125 2325
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1875 2100 300 225 1875 2100 2175 2325
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 1500 825 1875
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1875 1500 1875 1875
+4 0 -1 0 0 0 12 0.0000 4 135 225 675 2175 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1800 2250 Y2\001
+4 0 -1 0 0 0 12 0.0000 4 135 1185 675 2850 Factorial HMM\001
+4 0 0 100 0 0 12 0.0000 4 135 225 750 525 A1\001
+4 0 0 100 0 0 12 0.0000 4 135 225 1800 525 A2\001
+4 0 0 100 0 0 12 0.0000 4 135 210 750 1350 B1\001
+4 0 0 100 0 0 12 0.0000 4 135 210 1800 1350 B2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_factorial.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm_factorial.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_gauss.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm_gauss.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+6 225 600 1875 1875
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 525 1650 300 225 525 1650 825 1875
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1575 1650 300 225 1575 1650 1875 1875
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 825 1350 825
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 375 600 750 600 750 1050 375 1050 375 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 525 1050 525 1425
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1425 600 1800 600 1800 1050 1425 1050 1425 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1575 1050 1575 1425
+-6
+4 0 -1 0 0 0 12 0.0000 4 165 225 450 900 Q1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 375 1725 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1500 1725 Y2\001
+4 0 -1 0 0 0 12 0.0000 4 180 2130 150 2250 HMM with Gaussian output\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 1500 900 Q2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_gauss.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm_gauss.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_io.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm_io.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 1 0 1725.000 1200.000 600 450 375 1275 600 1950
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 1 0 2859.375 1303.125 1650 525 1425 1200 1575 1950
+ 0 0 1.00 60.00 120.00
+6 525 150 2175 2325
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1875 2100 300 225 1875 2100 2175 2325
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 825 2100 300 225 825 2100 1125 2325
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 900 375 300 225 900 375 1200 600
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1875 375 300 225 1875 375 2175 600
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1125 1275 1650 1275
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 675 1050 1050 1050 1050 1500 675 1500 675 1050
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 1500 825 1875
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1725 1050 2100 1050 2100 1500 1725 1500 1725 1050
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1875 1500 1875 1875
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 675 825 1050
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1875 675 1875 1050
+-6
+4 0 -1 0 0 0 12 0.0000 4 165 225 750 1350 Q1\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 1800 1350 Q2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 750 450 U1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 750 2175 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1725 450 U2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1725 2175 Y2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_io.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm_io.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_mixgauss.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm_mixgauss.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+6 675 1650 1275 2100
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 975 1875 300 225 975 1875 1275 2100
+4 0 -1 0 0 0 12 0.0000 4 135 225 825 1950 Y1\001
+-6
+6 1725 1725 2325 2175
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 2025 1950 300 225 2025 1950 2325 2175
+4 0 -1 0 0 0 12 0.0000 4 135 225 1950 2025 Y2\001
+-6
+6 1500 900 1875 1350
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1500 900 1875 900 1875 1350 1500 1350 1500 900
+4 0 -1 0 0 0 12 0.0000 4 135 255 1575 1200 M2\001
+-6
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1275 450 1800 450
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 825 225 1200 225 1200 675 825 675 825 225
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1875 225 2250 225 2250 675 1875 675 1875 225
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 225 900 600 900 600 1350 225 1350 225 900
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 675 450 900
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 525 1350 750 1650
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 675 1050 1650
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1875 675 1725 900
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1800 1350 1950 1725
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2100 675 2100 1725
+4 0 -1 0 0 0 12 0.0000 4 165 225 900 525 Q1\001
+4 0 -1 0 0 0 12 0.0000 4 135 255 300 1200 M1\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 1950 525 Q2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_mixgauss.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm_mixgauss.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_zoo.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm_zoo.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,162 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+5 1 0 2 0 0 50 0 -1 0.000 0 1 1 0 2784.375 4153.125 1575 3375 1350 4050 1500 4800
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 50 0 -1 0.000 0 1 1 0 1650.000 4050.000 525 3300 300 4125 525 4800
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 50 0 -1 0.000 0 1 1 0 6376.355 4323.343 5550 3450 5175 4275 5400 5025
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 50 0 -1 0.000 0 1 1 0 7138.600 4307.555 6525 3525 6150 4200 6450 5025
+ 1 1 1.00 60.00 120.00
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 750 4950 300 225 750 4950 1050 5175
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 1800 4950 300 225 1800 4950 2100 5175
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 825 3225 300 225 825 3225 1125 3450
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 1800 3225 300 225 1800 3225 2100 3450
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 3150 3150 300 225 3150 3150 3450 3375
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 4200 3150 300 225 4200 3150 4500 3375
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 3150 5625 300 225 3150 5625 3450 5850
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 4200 5625 300 225 4200 5625 4500 5850
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 5700 5100 300 225 5700 5100 6000 5325
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 6750 5100 300 225 6750 5100 7050 5325
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 3375 1800 300 225 3375 1800 3675 2025
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 1575 1650 300 225 1575 1650 1875 1875
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 525 1650 300 225 525 1650 825 1875
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 4425 1875 300 225 4425 1875 4725 2100
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 5550 1425 300 225 5550 1425 5850 1650
+1 1 0 2 7 0 50 0 20 0.000 1 0.0000 6600 1425 300 225 6600 1425 6900 1650
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3150 3750 3150 3375
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6750 4500 6750 4875
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 4275 150 4650 150 4650 600 4275 600 4275 150
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3225 600 2850 825
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 600 3900 975 3900 975 4350 600 4350 600 3900
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 750 4350 750 4725
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 750 3525 750 3900
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1800 4350 1800 4725
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1050 4125 1575 4125
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1800 3525 1800 3900
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 1650 3900 2025 3900 2025 4350 1650 4350 1650 3900
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 3000 3750 3375 3750 3375 4200 3000 4200 3000 3750
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3450 3975 3900 4725
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3450 3975 3975 3975
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 3000 4575 3375 4575 3375 5025 3000 5025 3000 4575
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3150 5025 3150 5400
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3450 4800 3975 4050
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3450 4800 3975 4800
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 4050 4575 4425 4575 4425 5025 4050 5025 4050 4575
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 4050 3750 4425 3750 4425 4200 4050 4200 4050 3750
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4200 3750 4200 3375
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4200 5025 4200 5400
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 5550 4050 5925 4050 5925 4500 5550 4500 5550 4050
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 5700 4500 5700 4875
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 3450 6525 3450
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 4275 6525 4275
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 6600 3225 6975 3225 6975 3675 6600 3675 6600 3225
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 6600 4050 6975 4050 6975 4500 6600 4500 6600 4050
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 825 825 1350 825
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1575 1050 1575 1425
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 375 600 750 600 750 1050 375 1050 375 600
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 525 1050 525 1425
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 1425 600 1800 600 1800 1050 1425 1050 1425 600
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 2625 825 3000 825 3000 1275 2625 1275 2625 825
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2925 1275 3150 1575
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3450 600 3450 1575
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 3225 150 3600 150 3600 600 3225 600 3225 150
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3675 375 4200 375
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 3900 825 4275 825 4275 1275 3900 1275 3900 825
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4275 600 4125 825
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4200 1275 4350 1650
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4500 600 4500 1650
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 5550 825 5550 1200
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 5400 375 5775 375 5775 825 5400 825 5400 375
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 5850 600 6375 600
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 6450 375 6825 375 6825 825 6450 825 6450 375
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6600 825 6600 1200
+2 1 0 2 0 0 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 1.00 60.00 120.00
+ 5850 1425 6300 1425
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 5550 3225 5925 3225 5925 3675 5550 3675 5550 3225
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_zoo.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/hmm_zoo.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_zoo_names.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm_zoo_names.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,169 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 1 0 1650.000 4050.000 525 3300 300 4125 525 4800
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 1 0 2784.375 4153.125 1575 3375 1350 4050 1500 4800
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 1 0 6376.355 4323.343 5550 3450 5175 4275 5400 5025
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 1 0 7138.600 4307.555 6525 3525 6150 4200 6450 5025
+ 0 0 1.00 60.00 120.00
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 5550 1425 300 225 5550 1425 5850 1650
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 6600 1425 300 225 6600 1425 6900 1650
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 525 1650 300 225 525 1650 825 1875
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1800 4950 300 225 1800 4950 2100 5175
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 825 3225 300 225 825 3225 1125 3450
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1800 3225 300 225 1800 3225 2100 3450
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 3150 3150 300 225 3150 3150 3450 3375
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 4200 3150 300 225 4200 3150 4500 3375
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 3150 5625 300 225 3150 5625 3450 5850
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 4200 5625 300 225 4200 5625 4500 5850
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 5700 5100 300 225 5700 5100 6000 5325
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 3375 1800 300 225 3375 1800 3675 2025
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 750 4950 300 225 750 4950 1050 5175
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 6750 5100 300 225 6750 5100 7050 5325
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 4425 1875 300 225 4425 1875 4725 2100
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1575 1650 300 225 1575 1650 1875 1875
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 5850 600 6375 600
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 5400 375 5775 375 5775 825 5400 825 5400 375
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 5550 825 5550 1200
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 6450 375 6825 375 6825 825 6450 825 6450 375
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6600 825 6600 1200
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 5850 1425 6300 1425
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 825 1350 825
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 375 600 750 600 750 1050 375 1050 375 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 525 1050 525 1425
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1425 600 1800 600 1800 1050 1425 1050 1425 600
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1575 1050 1575 1425
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 4125 1575 4125
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 600 3900 975 3900 975 4350 600 4350 600 3900
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 750 4350 750 4725
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1650 3900 2025 3900 2025 4350 1650 4350 1650 3900
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1800 4350 1800 4725
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 750 3525 750 3900
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1800 3525 1800 3900
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3150 5025 3150 5400
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4200 5025 4200 5400
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3150 3750 3150 3375
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4200 3750 4200 3375
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3450 4800 3975 4050
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3450 3975 3900 4725
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3450 4800 3975 4800
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 3000 4575 3375 4575 3375 5025 3000 5025 3000 4575
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 4050 4575 4425 4575 4425 5025 4050 5025 4050 4575
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3450 3975 3975 3975
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 3000 3750 3375 3750 3375 4200 3000 4200 3000 3750
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 4050 3750 4425 3750 4425 4200 4050 4200 4050 3750
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 5700 4500 5700 4875
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6750 4500 6750 4875
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6000 3450 6525 3450
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 5550 3225 5925 3225 5925 3675 5550 3675 5550 3225
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 6600 3225 6975 3225 6975 3675 6600 3675 6600 3225
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6000 4275 6525 4275
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 5550 4050 5925 4050 5925 4500 5550 4500 5550 4050
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 6600 4050 6975 4050 6975 4500 6600 4500 6600 4050
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3675 375 4200 375
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 3225 150 3600 150 3600 600 3225 600 3225 150
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 4275 150 4650 150 4650 600 4275 600 4275 150
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2625 825 3000 825 3000 1275 2625 1275 2625 825
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3225 600 2850 825
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2925 1275 3150 1575
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3450 600 3450 1575
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4275 600 4125 825
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4200 1275 4350 1650
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4500 600 4500 1650
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 3900 825 4275 825 4275 1275 3900 1275 3900 825
+4 0 -1 0 0 0 12 0.0000 4 180 2130 150 2400 HMM with Gaussian output\001
+4 0 -1 0 0 0 12 0.0000 4 180 1785 5100 2400 Auto Regressive HMM\001
+4 0 0 50 0 0 12 0.0000 4 180 1440 300 6075 Input-output HMM\001
+4 0 0 50 0 0 12 0.0000 4 180 1125 3000 6225 Coupled HMM\001
+4 0 -1 0 0 0 12 0.0000 4 135 1185 5475 6225 Factorial HMM\001
+4 0 0 50 0 0 12 0.0000 4 180 1530 2925 2625 of Gaussians output\001
+4 0 0 50 0 0 12 0.0000 4 135 1455 2925 2400 HMM with mixture\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/hmm_zoo_small.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/hmm_zoo_small.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,104 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+75.00
+Single
+-2
+1200 2
+5 1 0 2 0 7 50 0 -1 0.000 0 1 1 0 10209.375 1303.125 9000 525 8775 1200 8925 1950
+ 0 0 2.00 120.00 240.00
+5 1 0 2 0 7 50 0 -1 0.000 0 1 1 0 9075.000 1200.000 7950 450 7725 1275 7950 1950
+ 0 0 2.00 120.00 240.00
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 525 1650 300 225 525 1650 825 1875
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 1590 1654 300 225 1590 1654 1890 1879
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 3375 1800 300 225 3375 1800 3675 2025
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 4427 1882 300 225 4427 1882 4727 2107
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 5925 1800 300 225 5925 1800 6225 2025
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 6975 1800 300 225 6975 1800 7275 2025
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 8173 2107 300 225 8173 2107 8473 2332
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 9225 2100 300 225 9225 2100 9525 2325
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 8250 375 300 225 8250 375 8550 600
+1 1 0 2 0 0 50 0 20 0.000 1 0.0000 9225 375 300 225 9225 375 9525 600
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 1575 1050 1575 1425
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 825 825 1350 825
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 525 1050 525 1425
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 375 600 750 600 750 1050 375 1050 375 600
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 1425 600 1800 600 1800 1050 1425 1050 1425 600
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 3450 600 3450 1575
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 3225 600 2850 825
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 2625 825 3000 825 3000 1275 2625 1275 2625 825
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 3225 150 3600 150 3600 600 3225 600 3225 150
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 3675 375 4200 375
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 3900 825 4275 825 4275 1275 3900 1275 3900 825
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 4200 1275 4350 1650
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 4500 600 4500 1650
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 4275 150 4650 150 4650 600 4275 600 4275 150
+2 1 0 2 0 7 50 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 2.00 120.00 240.00
+ 4275 600 3975 825
+2 1 0 2 0 7 50 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 2.00 120.00 240.00
+ 2925 1275 3225 1575
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 8175 675 8175 1050
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 9225 675 9225 1050
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 9225 1500 9225 1875
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 8475 1275 9000 1275
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 8175 1500 8175 1875
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 8025 1050 8400 1050 8400 1500 8025 1500 8025 1050
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 9075 1050 9450 1050 9450 1500 9075 1500 9075 1050
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 5925 1200 5925 1575
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 6225 975 6750 975
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 6225 1800 6675 1800
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 6975 1200 6975 1575
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 6825 750 7200 750 7200 1200 6825 1200 6825 750
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 5775 750 6150 750 6150 1200 5775 1200 5775 750
+4 0 0 50 0 0 24 0.0000 4 255 945 375 2400 HMM\001
+4 0 0 50 0 0 24 0.0000 4 255 2580 2625 2475 MixGauss HMM\001
+4 0 0 50 0 0 24 0.0000 4 255 1470 8025 2700 IO-HMM\001
+4 0 0 50 0 0 24 0.0000 4 255 1575 5625 2475 AR-HMM\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/ifa.eps
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/ifa.eps Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,470 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%Creator: (ImageMagick)
+%%Title: (ifa.eps)
+%%CreationDate: (Tue Nov 16 19:52:10 2004)
+%%BoundingBox: 0 0 246 221
+%%DocumentData: Clean7Bit
+%%LanguageLevel: 1
+%%Pages: 1
+%%EndComments
+
+%%BeginDefaults
+%%EndDefaults
+
+%%BeginProlog
+%
+% Display a color image. The image is displayed in color on
+% Postscript viewers or printers that support color, otherwise
+% it is displayed as grayscale.
+%
+/DirectClassPacket
+{
+ %
+ % Get a DirectClass packet.
+ %
+ % Parameters:
+ % red.
+ % green.
+ % blue.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/DirectClassImage
+{
+ %
+ % Display a DirectClass image.
+ %
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { DirectClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayDirectClassPacket } image
+ } ifelse
+} bind def
+
+/GrayDirectClassPacket
+{
+ %
+ % Get a DirectClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % red
+ % green
+ % blue
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/GrayPseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassImage
+{
+ %
+ % Display a PseudoClass image.
+ %
+ % Parameters:
+ % class: 0-PseudoClass or 1-Grayscale.
+ %
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ class 0 gt
+ {
+ currentfile buffer readline pop
+ token pop /depth exch def pop
+ /grays columns 8 add depth sub depth mul 8 idiv string def
+ columns rows depth
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { currentfile grays readhexstring pop } image
+ }
+ {
+ %
+ % Parameters:
+ % colors: number of colors in the colormap.
+ % colormap: red, green, blue color packets.
+ %
+ currentfile buffer readline pop
+ token pop /colors exch def pop
+ /colors colors 3 mul def
+ /colormap colors string def
+ currentfile colormap readhexstring pop pop
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { PseudoClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayPseudoClassPacket } image
+ } ifelse
+ } ifelse
+} bind def
+
+/DisplayImage
+{
+ %
+ % Display a DirectClass or PseudoClass image.
+ %
+ % Parameters:
+ % x & y translation.
+ % x & y scale.
+ % label pointsize.
+ % image label.
+ % image columns & rows.
+ % class: 0-DirectClass or 1-PseudoClass.
+ % compression: 0-none or 1-RunlengthEncoded.
+ % hex color packets.
+ %
+ gsave
+ /buffer 512 string def
+ /byte 1 string def
+ /color_packet 3 string def
+ /pixels 768 string def
+
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ x y translate
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ currentfile buffer readline pop
+ token pop /pointsize exch def pop
+ /Times-Roman findfont pointsize scalefont setfont
+ x y scale
+ currentfile buffer readline pop
+ token pop /columns exch def
+ token pop /rows exch def pop
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ currentfile buffer readline pop
+ token pop /compression exch def pop
+ class 0 gt { PseudoClassImage } { DirectClassImage } ifelse
+ grestore
+} bind def
+%%EndProlog
+%%Page: 1 1
+%%PageBoundingBox: 0 0 246 221
+userdict begin
+DisplayImage
+0 0
+246 221
+12.000000
+246 221
+1
+1
+1
+1
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+fcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffc00000007f
+ffffffffffffffffff00000001fffffffffffffffffcffffffffdfffffff7fffffffffff
+ffffffff7ffffffdfffffffffffffffffcffffffffdfffffff7fffffffffffffffffff7f
+fffffdfffffffffffffffffcffffffffdfffffff7fffffffffffffffffff7ffffffdffff
+fffffffffffffcffffffffdfffffff7fffffffffffffffffff7ffffffdffffffffffffff
+fffcffffffffdfffffff7fffffffffffffffffff7ffffffdfffffffffffffffffcffffff
+ffdfffffff7fffffffffffffffffff7ffffffdfffffffffffffffffcffffffffdfffffff
+7fffffffffffffffffff7ffffffdfffffffffffffffffcffffffffdfffffff7fffffffff
+ffffffffff7ffffffdfffffffffffffffffcffffffffdfffffff7fffffffffffffffffff
+7ffffffdfffffffffffffffffcffffffffdfffffff7fffffffffffffffffff7ffffffdff
+fffffffffffffffcffffffffdf87dfff7fffffffffffffffffff7e1ffffdffffffffffff
+fffffcffffffffdf339fff7fffffffffffffffffff7ccffffdfffffffffffffffffcffff
+ffffdf7b5fff7fffffffffffffffffff7deffffdfffffffffffffffffcffffffffdefddf
+ff7fffffffffffffffffff7bf53ffdfffffffffffffffffcffffffffdefddfff7fffffff
+ffffffffffff7bf6dffdfffffffffffffffffcffffffffdefddfff7fffffffffffffffff
+ff7bf6dffdfffffffffffffffffcffffffffdf7bdfff7fffffffffffffffffff7deedffd
+fffffffffffffffffcffffffffdf33dfff7fffffffffffffffffff7ccedffdffffffffff
+fffffffcffffffffdf878fff7fffffffffffffffffff7e1c4ffdfffffffffffffffffcff
+ffffffdfe7ffff7fffffffffffffffffff7f9ffffdfffffffffffffffffcffffffffdff9
+ffff7fffffffffffffffffff7fe7fffdfffffffffffffffffcffffffffdfffffff7fffff
+ffffffffffffff7ffffffdfffffffffffffffffcffffffffdfffffff7fffffffffffffff
+ffff7ffffffdfffffffffffffffffcffffffffdfffffff7fffffffffffffffffff7fffff
+fdfffffffffffffffffcffffffffdfffffff7fffffffffffffffffff7ffffffdffffffff
+fffffffffcffffffffdfffffff7fffffffffffffffffff7ffffffdfffffffffffffffffc
+ffffffffdfffffff7fffffffffffffffffff7ffffffdfffffffffffffffffcffffffffdf
+ffffff7fffffffffffffffffff7ffffffdfffffffffffffffffcffffffffdfffffff7fff
+ffffffffffffffff7ffffffdfffffffffffffffffcffffffffc00000007fffffffffffff
+ffffff00000001fffffffffffffffffcffffffffffffbffffffffffffffffffffffffffe
+fffffffffffffffffffffcffffffffffffbffffffffffffffffffffffffffeffffffffff
+fffffffffffcffffffffffffbffffffffffffffffffffffffffeffffffffffffffffffff
+fcffffffffffffbffffffffffffffffffffffffffefffffffffffffffffffffcffffffff
+fffeaffffffffffffffffffffffffffabffffffffffffffffffffcfffffffffffeafffff
+fffffffffffffffffffffabffffffffffffffffffffcfffffffffffe9fffffffffffffff
+fffffffffffa7ffffffffffffffffffffcffffffffffff1fffffffffffffffffffffffff
+fc7ffffffffffffffffffffcffffffffffff1ffffffffffffffffffffffffffc7fffffff
+fffffffffffffcffffffffffff1ffffffffffffffffffffffffffc7fffffffffffffffff
+fffcffffffffffff3ffffffffffffffffffffffffffcfffffffffffffffffffffcffffff
+ffffffbffffffffffffffffffffffffffefffffffffffffffffffffcffffffffffffbfff
+fffffffffffffffffffffffefffffffffffffffffffffcffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffffffcffffffffffff800fffffffffffffffffffff
+f800fffffffffffffffffffffcfffffffffffc7ff1ffffffffffffffffffffc7ff1fffff
+fffffffffffffffcffffffffffe3fffe3ffffffffffffffffffe3fffe3ffffffffffffff
+fffffcffffffffffdfffffdffffffffffffffffffdfffffdfffffffffffffffffffcffff
+ffffff3fffffe7fffffffffffffffff3fffffe7ffffffffffffffffffcfffffffffeffff
+fffbffffffffffffffffefffffffbffffffffffffffffffcfffffffffdfffffffdffffff
+ffffffffffdfffffffdffffffffffffffffffcfffffffffbfffffffeffffffffffffffff
+bfffffffeffffffffffffffffffcfffffffff7ffffffff7fffffffffffffff7ffffffff7
+fffffffffffffffffcfffffffff7ffffffff7fffffffffffffff7ffffffff7ffffffffff
+fffffffcffffffffefffffffffbffffffffffffffefffffffffbfffffffffffffffffcff
+ffffffefff8c6fffbffffffffffffffeff18fffffbfffffffffffffffffcffffffffdfff
+decfffdffffffffffffffdffbdfffffdfffffffffffffffffcffffffffdfffedafffdfff
+fffffffffffdffdbfffffdfffffffffffffffffcffffffffdfffe3efffdfffffffffffff
+fdffc74ffffdfffffffffffffffffcffffffffdffff3efffdffffffffffffffdffe7b7ff
+fdfffffffffffffffffcffffffffdfffe9efffdffffffffffffffdffd3b7fffdffffffff
+fffffffffcffffffffdfffedefffdffffffffffffffdffdbb7fffdfffffffffffffffffc
+ffffffffdfffdeefffdffffffffffffffdffbdb7fffdfffffffffffffffffcffffffffef
+ff8c47ffbffffffffffffffeff1813fffbfffffffffffffffffcffffffffefffffffffbf
+fffffffffffffefffffffffbfffffffffffffffffcfffffffff7ffffffff7fffffffffff
+ffff7ffffffff7fffffffffffffffffcfffffffff7ffffffff7fffffffffffffff7fffff
+fff7fffffffffffffffffcfffffffffbfffffffeffffff3cf3ffffffbfffffffefffffff
+fffffffffffcfffffffffdfffffffdffffff3cf3ffffffdfffffffdfffffffffffffffff
+fcfffffffffefffffff9ffffffffffffffffefffffffbffffffffffffffffffcffffffff
+ff3fffffe67ffffffffffffffff3fffffe7ffffffffffffffffffcffffffffffdfffffdf
+8ffffffffffffffffdfffffdfffffffffffffffffffcffffffffffe3fffe3ff3ffffffff
+fffffffe3fffe3fffffffffffffffffffcfffffffffffc7ff1fffc7fffffffffffffffc7
+ff1ffffffffffffffffffffcfffffffffff7800f7fff9ffffffffffffffff000f7ffffff
+fffffffffffffcfffffffffff7ffff7fffe3ffffffffffffff87fffbffffffffffffffff
+fffcffffffffffefffffbffffcfffffffffffffe5ffffdfffffffffffffffffffcffffff
+ffffefffffbfffff1ffffffffffff13ffffefffffffffffffffffffcffffffffffdfffff
+dfffffe7ffffffffff8effffff7ffffffffffffffffffcffffffffffdfffffdffffff8ff
+fffffffc7dffffffbffffffffffffffffffcffffffffffdfffffefffffff3ffffffff3f3
+ffffffdffffffffffffffffffcffffffffffbfffffefffffffc7ffffff8fefffffffefff
+fffffffffffffffcffffffffffbffffff7fffffff9fffffc7f9ffffffff7ffffffffffff
+fffffcffffffffffbffffff7fffffffe3fffe3ff7ffffffffbfffffffffffffffffcffff
+ffffff7ffffffbffffffffcfff9ffcfffffffffdfffffffffffffffffcffffffffff7fff
+fffbfffffffff1fc7ffbfffffffffefffffffffffffffffcfffffffffefffffffdffffff
+fffe63fff7ffffffffff7ffffffffffffffffcfffffffffefffffffdffffffffff0fffcf
+ffffffffffbffffffffffffffffcfffffffffefffffffefffffffffcf3ffbfffffffffff
+dffffffffffffffffcfffffffffdfffffffeffffffffe3fc7e7fffffffffffefffffffff
+fffffffcfffffffffdffffffff7fffffff1fff9dfffffffffffff7fffffffffffffffcff
+fffffffdffffffff7ffffff8ffffe3fffffffffffffbfffffffffffffffcfffffffffbff
+ffffffbfffffe7ffffecfffffffffffffdfffffffffffffffcfffffffffbffffffffbfff
+ff1fffffdf1ffffffffffffefffffffffffffffcfffffffff7ffffffffdffff8ffffff3f
+e7ffffffffffff7ffffffffffffffcfffffffff7ffffffffdfffc7fffffefff8ffffffff
+ffffbffffffffffffffcfffffffff7ffffffffefff3ffffff9ffff3fffffffffffdfffff
+fffffffffcffffffffefffffffffeff8fffffff7ffffc7ffffffffffeffffffffffffffc
+ffffffffeffffffffff7c7ffffffcffffff9fffffffffff7fffffffffffffcffffffffef
+fffffffff63fffffffbffffffe3ffffffffffbfffffffffffffcffffffffdffffffffff9
+ffffffff7fffffffcffffffffffdfffffffffffffcffffffffdfffffffffc3fffffffcff
+fffffff1fffffffffefffffffffffffcffffffffbffffffffe3dffffffdbfffffffffe7f
+ffffffff7ffffffffffffcffffffffbffffffff1fdffffffa7ffffffffff8fffffffffbf
+fffffffffffcfffffffebfffffffcffebfffff5ffffffffffff3ffffffffdfffffffffff
+fcfffffffe7ffffffe3ffebffffe27fffffffffffc7fffffffeffffffffffffcfffffffe
+5ffffff1fffd3ffffe9fffffffffffff9ffffffff6fffffffffffcfffffffe3fffff8fff
+fe5ffffc7fffffffffffffe3fffffffb7ffffffffffcfffffffcbffffe7fffff1ffff9ff
+fffffffffffffcfffffffd7ffffffffffcfffffffc7fff71ffffff1fffffffffffffffff
+ffff1ffffff6bffffffffffcfffffffcfffc8fffffff9fffffffffffffffffffffe7ffff
+f93ffffffffffcfffffffcfff07fffffffcffffffffffffffffffffff8fffffe1fffffff
+fffcfffffffdffc3ffffffffefffffffffffffffffffffff3fffff9ffffffffffcffffff
+ffff803fffffffffffffffffffffffffffffffc7ffffeffffffffffcffffff001fffffff
+ffffff800ffffffffffffffffffff9dffff800fffffffcfffff8ffe3fffffffffffc7ff1
+fffffffffffffffffffe27ffc7ff1ffffffcffffc6eeec7fffffffffe377763fffffffff
+ffffffffffcbfe377763fffffcffffbfffffbfffffffffdfffffdfffffffffffffffffff
+00fdfffffdfffffcfffe3bbfbb8fffffffff1ddfddc7fffffffffffffffffff871ddfddc
+7ffffcfffdfffffff7fffffffefffffffbffffffffffffffffffffefffffffbffffcfffa
+eeeeeeebfffffffd77777775ffffffffffffffffffffd77777775ffffcfff7fffffffdff
+fffffbfffffffeffffffffffffffffffffbfffffffeffffcffebfbfbfbfafffffff5fdfd
+fdfd7fffffffffffffffffff5fdfdfdfd7fffcffeffffffffefffffff7ffffffff7fffff
+ffffffffffffff7ffffffff7fffcffceeeeeeeee7fffffe7777777773fffffffffffffff
+fffe7777777773fffcffdfe38dffff7fffffeff1c47fffbffffffffffffffffffefe38ff
+fffbfffcffbfb799bfbfbfffffdfdbcb9fdfdffffffffffffffffffdfd7dfdfdfdfffcff
+bffbb5ffffbfffffdffddfbfffdfffff3cf3fffffffffdffbbfffffdfffcffaeecaceeee
+bfffffd7765737775fffff3cf3fffffffffd7753417775fffcffbffc7dffff8000001ffe
+3f7fffdfffffffffffff800001ffc7b6fffdfffcffbbfaf9fbfbbfffffddfd7cfdfddfff
+fffffffffffffffddfcf96dfddfffcffbffefdffffbfffffdfff7dffffdfffffffffffff
+fffffdffefb6fffdfffcffaeeeeceeeebfffffd7777337775ffffffffffffffffffd7767
+367775fffcffdffc78ffff7fffffeffe383fffbffffffffffffffffffeffc7127ffbfffc
+ffdfbbbfbbbf7fffffefdddfdddfbffffffffffffffffffefdddfdddfbfffcffefffffff
+fefffffff7ffffffff7fffffffffffffffffff7ffffffff7fffcffeeeeeeeeeefffffff7
+777777777fffffffffffffffffff7777777777fffcfff7fffffffdfffffffbfffffffeff
+ffffffffffffffffffbfffffffeffffcfffbfbfbfbfbfffffffdfdfdfdfdffffffffffff
+ffffffffdfdfdfdfdffffcfffdfffffff7fffffffefffffffbffffffffffffffffffffef
+ffffffbffffcfffe6eeeeecfffffffff37777763ffffffffffffffffffffe37777767fff
+fcffffbfffffbfffffffffdfffffddffffffffffffffffffffddfffffdfffffcffffc7bf
+bc7fffffffffe3dfde3e7fffffffffffffffffff3e3dfde3fffffcfffff8ffe3ffffffff
+fffc7ff1ffbffffffffffffffffffeffc7ff1ffffffcffffff001effffffffffff800fff
+cffffffffffffffffff9fff800fffffffcffffffffff7ffffffffffffffffff7ffffffff
+fffffffff7ffff7ffffffffcffffffffff9ffffffffffffffffff9ffffffffffffffffcf
+fffcfffffffffcffffffffffeffffffffffffffffffe7fffffffffffffff3ffffbffffff
+fffcfffffffffff7ffffffffffffffffff9ffffffffffffffcfffff7fffffffffcffffff
+fffff9ffffffffffffffffffe7fffffffffffff3ffffcffffffffffcfffffffffffeffff
+fffffffffffffff9ffffffffffffcfffffbffffffffffcffffffffffff3fffffffffffff
+fffffe3ffffffffffe3ffffe7ffffffffffcffffffffffffdfffffffffffffffffffc7ff
+fffffff1fffffdfffffffffffcffffffffffffe7fffffffffffffffffff87fffffff0fff
+fff3fffffffffffcfffffffffffffbffffffffffffffffffff83ffffe0ffffffefffffff
+fffffcfffffffffffffcfffffffffffffffffffffc00001fffffff9ffffffffffffcffff
+ffffffffff3ffffffffffffffffffffffffffffffffe7ffffffffffffcffffffffffffff
+cffffffffffffffffffffffffffffffff9fffffffffffffcfffffffffffffff7ffffffff
+fffffffffffffffffffffff7fffffffffffffcfffffffffffffff9ffffffffffffffffff
+ffffffffffffcffffffffffffffcfffffffffffffffe7fffffffffffffffffffffffffff
+ff3ffffffffffffffcffffffffffffffff8ffffffffffffffffffffffffffff8ffffffff
+fffffffcfffffffffffffffff3ffffffffffffffffffffffffffe7fffffffffffffffcff
+fffffffffffffffcffffffffffffffffffffffffff9ffffffffffffffffcffffffffffff
+ffffff1ffffffffffffffffffffffffc7ffffffffffffffffcffffffffffffffffffe7ff
+fffffffffffffffffffff3fffffffffffffffffcfffffffffffffffffff8ffffffffffff
+ffffffffff8ffffffffffffffffffcffffffffffffffffffff1ffffffffffffffffffffc
+7ffffffffffffffffffcffffffffffffffffffffe3ffffffffffffffffffe3ffffffffff
+fffffffffcfffffffffffffffffffffc3ffffffffffffffffe1ffffffffffffffffffffc
+ffffffffffffffffffffffc3ffffffffffffffe1fffffffffffffffffffffcffffffffff
+fffffffffffffc3ffffffffffffe1ffffffffffffffffffffffcffffffffffffffffffff
+ffffc0ffffffffff81fffffffffffffffffffffffcffffffffffffffffffffffffff00ff
+ffff807ffffffffffffffffffffffffcffffffffffffffffffffffffffff0000007fffff
+fffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+fcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+fffcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffff
+fffff1ff9ffffffe7ffff80ffffffffbfffcfffdfffffffffffffffcfffffffffffbffdf
+ffffff7ffffdeffffffffbfffefffffffffffffffffffcfffffffffffbffdfffffff7ffe
+fdffffbffff5fffefffffffffffffffffffcfffffffffffb4f1cc3cd3c734c3dde790cd3
+f5d3ce9989c7fffffffffffffcfffffffffffbb6db6db6db6db6fc1db6bb67eeedb6db6d
+b7fffffffffffffcfffffffffffbb6d86d86db61b6fdde37bb6fe0edc6d33d9fffffffff
+fffffcfffffffffffbb6dbedbedb6fb6fdfdb7bb6feeedb6d7cde7fffffffffffffcffff
+fffffffbb6d9ad9adb66b6fdfdb6bb6fdf6db6e76db7fffffffffffffcfffffffffff113
+2c63c44cb113387e59ccc78e04c86f188ffffffffffffffcffffffffffffffffefffffff
+ffffffffffffffffffeffffffffffffffffffcffffffffffffffffefffffffffffffffff
+ffffffffdffffffffffffffffffcffffffffffffffffc7ffffffffffffffffffffffff9f
+fffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcff
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffcffffffffffffffffffffffffffffffffffffffffffffffffffff
+fffffffffcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffff
+fffffffffffffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffff
+fffffffffffffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffff
+fffffffffffffffffffffffffffffffcffffffffffffffffffffffffffffffffffffffff
+fffffffffffffffffffffc
+end
+%%PageTrailer
+%%Trailer
+%%EOF
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/ifa.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/ifa.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 0 0 2362.500 1537.500 1725 2325 2400 2550 3000 2325
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 0 0 1950.000 818.750 675 2400 1950 2850 3225 2400
+6 225 900 3600 2475
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 900 1125 300 225 900 1125 1200 1350
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2400 1125 300 225 2400 1125 2700 1350
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 525 2175 300 225 525 2175 825 2400
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 3240 2179 300 225 3240 2179 3540 2404
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1500 2175 300 225 1500 2175 1800 2400
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 750 1350 525 1950
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1125 1275 3000 2025
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2325 1350 675 1950
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2550 1350 3150 1950
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 1350 1350 1950
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2325 1350 1575 1875
+4 0 -1 0 0 0 12 0.0000 4 135 225 825 1200 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 2250 1200 Xn\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 375 2250 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 270 3075 2250 Ym\001
+4 0 -1 0 0 0 24 0.0000 4 30 270 1500 1275 ...\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1350 2250 Y2\001
+4 0 -1 0 0 0 24 0.0000 4 30 270 2100 2175 ...\001
+-6
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 600 225 1050 225 1050 675 600 675 600 225
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2250 225 2700 225 2700 675 2250 675 2250 225
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 675 825 900
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2475 675 2475 900
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 2
+ 825 2175 1200 2175
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 2
+ 2625 2175 2925 2175
+4 0 -1 0 0 0 12 0.0000 4 165 225 675 525 Q1\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 2325 525 Qn\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/ifa.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/ifa.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/kf.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/kf.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+6 225 225 1950 2250
+6 225 225 1950 675
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 525 450 300 225 525 450 825 675
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1650 450 300 225 1650 450 1950 675
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 450 1350 450
+-6
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 525 1575 300 225 525 1575 825 1800
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1650 1575 300 225 1650 1575 1950 1800
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 525 750 525 1275
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1650 750 1650 1275
+4 0 -1 0 0 0 12 0.0000 4 135 1455 225 2250 Kalman filter model\001
+-6
+4 0 -1 0 0 0 12 0.0000 4 135 210 375 525 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 1500 525 X2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 375 1650 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1500 1650 Y2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/kf.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/kf.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/kf_input.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/kf_input.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 1 0 7 100 0 -1 0.000 0 1 1 0 3131.250 1462.500 1575 450 1275 1425 1575 2475
+ 0 0 1.00 60.00 120.00
+5 1 0 1 0 7 100 0 -1 0.000 0 1 1 0 4272.606 1559.043 2775 525 2475 1275 2700 2475
+ 0 0 1.00 60.00 120.00
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1875 2550 300 225 1875 2550 2175 2775
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 3000 2550 300 225 3000 2550 3300 2775
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1875 1425 300 225 1875 1425 2175 1650
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3000 1425 300 225 3000 1425 3300 1650
+1 1 0 1 0 0 100 0 2 0.000 1 0.0000 1875 375 300 225 1875 375 2175 600
+1 1 0 1 0 0 100 0 2 0.000 1 0.0000 3000 375 300 225 3000 375 3300 600
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1875 1725 1875 2250
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3000 1725 3000 2250
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2175 1425 2700 1425
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1875 600 1875 1200
+2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3000 600 3000 1200
+4 0 -1 0 0 0 12 0.0000 4 135 225 1725 1500 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 2850 1500 X2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1725 2625 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 2850 2625 Y2\001
+4 0 0 100 0 0 12 0.0000 4 135 225 1725 450 U1\001
+4 0 0 100 0 0 12 0.0000 4 135 225 2850 450 U2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/kf_input.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/kf_input.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/kf_notime.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/kf_notime.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 1 0 2 0 0 100 0 2 0.000 1 0.0000 525 1575 300 225 525 1575 825 1800
+1 1 0 2 0 0 100 0 2 0.000 1 0.0000 1650 1575 300 225 1650 1575 1950 1800
+1 1 0 2 0 0 100 0 -1 0.000 1 0.0000 525 450 300 225 525 450 825 675
+1 1 0 2 0 0 100 0 -1 0.000 1 0.0000 1650 450 300 225 1650 450 1950 675
+2 1 0 2 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 525 750 525 1275
+2 1 0 2 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 1650 750 1650 1275
+2 1 0 2 0 0 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 2.00 120.00 240.00
+ 825 450 1350 450
+4 0 0 100 0 0 20 0.0000 4 195 210 375 525 X\001
+4 0 0 100 0 0 20 0.0000 4 195 210 1500 525 X\001
+4 0 0 100 0 0 20 0.0000 4 195 210 1500 1650 Y\001
+4 0 0 100 0 0 20 0.0000 4 195 210 375 1650 Y\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/kf_obs_track.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/kf_obs_track.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+#FIG 3.2
+Portrait
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 1350 1500 335 335 1200 1200 1500 1800
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 1350 2700 335 335 1200 2400 1500 3000
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 1350 3900 335 335 1200 3600 1500 4200
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 1350 5100 335 335 1200 4800 1500 5400
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 2850 5100 335 335 2700 4800 3000 5400
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 2850 3900 335 335 2700 3600 3000 4200
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 2850 2700 335 335 2700 2400 3000 3000
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 2850 1500 335 335 2700 1200 3000 1800
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 1500 2550 1500
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 2700 2550 1650
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 3900 2475 3900
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 5100 2625 4125
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 5100 2475 5100
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 2700 2550 2700
+4 0 0 100 0 0 20 0.0000 4 195 405 1125 2850 dx1\001
+4 0 0 100 0 0 20 0.0000 4 195 270 1125 4050 x2\001
+4 0 0 100 0 0 20 0.0000 4 195 405 1200 5175 dx2\001
+4 0 0 100 0 0 20 0.0000 4 195 270 1200 1650 x1\001
+4 0 0 100 0 0 20 0.0000 4 195 270 2700 1650 x1\001
+4 0 0 100 0 0 20 0.0000 4 195 405 2700 2850 dx1\001
+4 0 0 100 0 0 20 0.0000 4 195 270 2700 4050 x2\001
+4 0 0 100 0 0 20 0.0000 4 195 405 2700 5250 dx2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/kf_scalar_track.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/kf_scalar_track.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+#FIG 3.2
+Portrait
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 2726.786 5448.214 1125 4125 675 5775 1050 6675
+ 0 0 4.00 120.00 240.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 6809.923 4980.705 1050 1725 225 5625 975 8100
+ 0 0 4.00 120.00 240.00
+5 1 0 2 0 7 100 0 -1 0.000 0 0 1 0 1778.571 5400.000 3225 3900 3750 6075 3225 6900
+ 0 0 4.00 120.00 240.00
+5 1 0 2 0 7 100 0 -1 0.000 0 0 1 0 -75.000 4837.500 3225 1500 4575 5475 3225 8175
+ 0 0 4.00 120.00 240.00
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 1350 1500 335 335 1200 1200 1500 1800
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 1350 2700 335 335 1200 2400 1500 3000
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 1350 3900 335 335 1200 3600 1500 4200
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 1350 5100 335 335 1200 4800 1500 5400
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 2850 5100 335 335 2700 4800 3000 5400
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 2850 3900 335 335 2700 3600 3000 4200
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 2850 2700 335 335 2700 2400 3000 3000
+1 4 0 2 0 7 100 0 -1 0.000 1 0.0000 2850 1500 335 335 2700 1200 3000 1800
+1 4 0 2 0 0 100 0 2 0.000 1 0.0000 1350 6900 335 335 1200 6600 1500 7200
+1 4 0 2 0 0 100 0 2 0.000 1 0.0000 2850 6900 335 335 2700 6600 3000 7200
+1 4 0 2 0 0 100 0 2 0.000 1 0.0000 2850 8100 335 335 2700 7800 3000 8400
+1 4 0 2 0 0 100 0 2 0.000 1 0.0000 1350 8100 335 335 1200 7800 1500 8400
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 1500 2550 1500
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 2700 2550 1650
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 3900 2475 3900
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 5100 2625 4125
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 5100 2475 5100
+2 1 0 2 0 0 100 0 2 0.000 0 0 -1 1 0 2
+ 0 0 4.00 120.00 240.00
+ 1725 2700 2550 2700
+4 0 0 100 0 0 20 0.0000 4 195 405 1125 2850 dx1\001
+4 0 0 100 0 0 20 0.0000 4 195 270 1125 4050 x2\001
+4 0 0 100 0 0 20 0.0000 4 195 405 1200 5175 dx2\001
+4 0 0 100 0 0 20 0.0000 4 195 270 1200 1650 x1\001
+4 0 0 100 0 0 20 0.0000 4 195 270 2700 1650 x1\001
+4 0 0 100 0 0 20 0.0000 4 195 405 2700 2850 dx1\001
+4 0 0 100 0 0 20 0.0000 4 195 270 2700 4050 x2\001
+4 0 0 100 0 0 20 0.0000 4 195 405 2700 5250 dx2\001
+4 0 0 100 0 0 20 0.0000 4 255 270 1200 6975 y2\001
+4 0 0 100 0 0 20 0.0000 4 255 270 1200 8175 y1\001
+4 0 0 100 0 0 20 0.0000 4 255 270 2700 7050 y2\001
+4 0 0 100 0 0 20 0.0000 4 255 270 2700 8175 y1\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/kfhead.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/kfhead.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mathbymatlab.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/mathbymatlab.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mcmc_accept.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/mcmc_accept.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mcmc_post.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/mcmc_post.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mfa.eps
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/mfa.eps Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,347 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%Creator: (ImageMagick)
+%%Title: (mfa.eps)
+%%CreationDate: (Tue Nov 16 19:52:06 2004)
+%%BoundingBox: 0 0 126 151
+%%DocumentData: Clean7Bit
+%%LanguageLevel: 1
+%%Pages: 1
+%%EndComments
+
+%%BeginDefaults
+%%EndDefaults
+
+%%BeginProlog
+%
+% Display a color image. The image is displayed in color on
+% Postscript viewers or printers that support color, otherwise
+% it is displayed as grayscale.
+%
+/DirectClassPacket
+{
+ %
+ % Get a DirectClass packet.
+ %
+ % Parameters:
+ % red.
+ % green.
+ % blue.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/DirectClassImage
+{
+ %
+ % Display a DirectClass image.
+ %
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { DirectClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayDirectClassPacket } image
+ } ifelse
+} bind def
+
+/GrayDirectClassPacket
+{
+ %
+ % Get a DirectClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % red
+ % green
+ % blue
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile color_packet readhexstring pop pop
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/GrayPseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet; convert to grayscale.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ color_packet 0 get 0.299 mul
+ color_packet 1 get 0.587 mul add
+ color_packet 2 get 0.114 mul add
+ cvi
+ /gray_packet exch def
+ compression 0 eq
+ {
+ /number_pixels 1 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add def
+ } ifelse
+ 0 1 number_pixels 1 sub
+ {
+ pixels exch gray_packet put
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassPacket
+{
+ %
+ % Get a PseudoClass packet.
+ %
+ % Parameters:
+ % index: index into the colormap.
+ % length: number of pixels minus one of this color (optional).
+ %
+ currentfile byte readhexstring pop 0 get
+ /offset exch 3 mul def
+ /color_packet colormap offset 3 getinterval def
+ compression 0 eq
+ {
+ /number_pixels 3 def
+ }
+ {
+ currentfile byte readhexstring pop 0 get
+ /number_pixels exch 1 add 3 mul def
+ } ifelse
+ 0 3 number_pixels 1 sub
+ {
+ pixels exch color_packet putinterval
+ } for
+ pixels 0 number_pixels getinterval
+} bind def
+
+/PseudoClassImage
+{
+ %
+ % Display a PseudoClass image.
+ %
+ % Parameters:
+ % class: 0-PseudoClass or 1-Grayscale.
+ %
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ class 0 gt
+ {
+ currentfile buffer readline pop
+ token pop /depth exch def pop
+ /grays columns 8 add depth sub depth mul 8 idiv string def
+ columns rows depth
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { currentfile grays readhexstring pop } image
+ }
+ {
+ %
+ % Parameters:
+ % colors: number of colors in the colormap.
+ % colormap: red, green, blue color packets.
+ %
+ currentfile buffer readline pop
+ token pop /colors exch def pop
+ /colors colors 3 mul def
+ /colormap colors string def
+ currentfile colormap readhexstring pop pop
+ systemdict /colorimage known
+ {
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { PseudoClassPacket } false 3 colorimage
+ }
+ {
+ %
+ % No colorimage operator; convert to grayscale.
+ %
+ columns rows 8
+ [
+ columns 0 0
+ rows neg 0 rows
+ ]
+ { GrayPseudoClassPacket } image
+ } ifelse
+ } ifelse
+} bind def
+
+/DisplayImage
+{
+ %
+ % Display a DirectClass or PseudoClass image.
+ %
+ % Parameters:
+ % x & y translation.
+ % x & y scale.
+ % label pointsize.
+ % image label.
+ % image columns & rows.
+ % class: 0-DirectClass or 1-PseudoClass.
+ % compression: 0-none or 1-RunlengthEncoded.
+ % hex color packets.
+ %
+ gsave
+ /buffer 512 string def
+ /byte 1 string def
+ /color_packet 3 string def
+ /pixels 768 string def
+
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ x y translate
+ currentfile buffer readline pop
+ token pop /x exch def
+ token pop /y exch def pop
+ currentfile buffer readline pop
+ token pop /pointsize exch def pop
+ /Times-Roman findfont pointsize scalefont setfont
+ x y scale
+ currentfile buffer readline pop
+ token pop /columns exch def
+ token pop /rows exch def pop
+ currentfile buffer readline pop
+ token pop /class exch def pop
+ currentfile buffer readline pop
+ token pop /compression exch def pop
+ class 0 gt { PseudoClassImage } { DirectClassImage } ifelse
+ grestore
+} bind def
+%%EndProlog
+%%Page: 1 1
+%%PageBoundingBox: 0 0 126 151
+userdict begin
+DisplayImage
+0 0
+126 151
+12.000000
+126 151
+1
+1
+1
+1
+fffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffffffffffff
+fffffffffffffffcfffffffffffffffffffffffffffffffcffffffffffffffffffffffff
+fffffffcfffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffc
+ff8000001fffffffffffff800ffffffcffbfffffdffffffffffffc7ff1fffffcffbfffff
+dfffffffffffe3fffe3ffffcffbfffffdfffffffffffdfffffdffffcffbfffffdfffffff
+ffff3fffffe7fffcffbfffffdffffffffffefffffffbfffcffbfffffdffffffffffdffff
+fffdfffcffbfffffdffffffffffbfffffffefffcffbfffffdffffffffff7ffffffff7ffc
+ffbfffffdffffffffff7ffffffff7ffcffbfffffdfffffffffefffffffffbffcffbf0fff
+dfffffffffeff18fffffbffcffbe67ffdfffffffffdffbdfffffdffcffbef7ffdfffffff
+ffdffdbfffffdffcffbdfbffdfffffffffdffc7fffffdffcffbdfbffdfffffffffdffe7f
+ffffdffcffbdfbffdfffffffffdffd3fffffdffcffbef7ffdfffffffffdffdbfffffdffc
+ffbe67ffdfffffffffdffbdfffffdffcffbf0fffdfffffffffeff18fffffbffcffbfcfff
+dfffffffffefffffffffbffcffbff3ffdffffffffff7ffffffff7ffcffbfffffdfffffff
+fff7ffffffff7ffcffbfffffdffffffffffbfffffffefffcffbfffffdffffffffffdffff
+fffdfffcffbfffffdffffffffffefffffffbfffcffbfffffdfffffffffff3fffffe7fffc
+ffbfffffdfffffffffffdfffffdffffcffbfffffdfffffffffffe3fffe3ffffcffbfffff
+dffffffffffffc7ff1fffffcff8000001fffffffffffff800ffffffcfffffffdffffffff
+ffffff7ffffffffcfffffffdffffffffffffff7ffffffffcfffffffefffffffffffffeff
+fffffffcffffffff7ffffffffffffdfffffffffcffffffffbffffffffffffbfffffffffc
+ffffffffbffffffffffffbfffffffffcffffffffdffffffffffff7fffffffffcffffffff
+efffffffffffeffffffffffcffffffffefffffffffffeffffffffffcfffffffff7ffffff
+ffffdffffffffffcfffffffffbffffffffffbffffffffffcfffffffffdffffffffff7fff
+fffffffcfffffffffdffffffffff7ffffffffffcfffffffffefffffffffefffffffffffc
+ffffffffff7ffffffffdfffffffffffcffffffffff7ffffffffdfffffffffffcffffffff
+ffbffffffffbfffffffffffcffffffffffdffffffff7fffffffffffcffffffffffefffff
+ffeffffffffffffcffffffffffefffffffeffffffffffffcfffffffffff7ffffffdfffff
+fffffffcfffffffffffbffffffbffffffffffffcfffffffffffbffffffbffffffffffffc
+fffffffffffdffffff7ffffffffffffcfffffffffffefffffefffffffffffffcffffffff
+ffff5ffff5fffffffffffffcffffffffffff5ffff5fffffffffffffcfffffffffffeafff
+eafffffffffffffcffffffffffff4fffe5fffffffffffffcffffffffffff8fffd3ffffff
+fffffffcffffffffffffc7ffc7fffffffffffffcffffffffffffe7ffcffffffffffffffc
+fffffffffffff3ff9ffffffffffffffcfffffffffffffbffbffffffffffffffcffffffff
+fffffc007ffffffffffffffcffffffffffffe3ff8ffffffffffffffcffffffffffff1bbb
+b1fffffffffffffcfffffffffffefffffefffffffffffffcfffffffffff8eefeee3fffff
+fffffffcfffffffffff7ffffffdffffffffffffcffffffffffebbbbbbbaffffffffffffc
+ffffffffffdffffffff7fffffffffffcffffffffffafefefefebfffffffffffcffffffff
+ffbffffffffbfffffffffffcffffffffff3bbbbbbbb9fffffffffffcffffffffff7f8e3f
+fffdfffffffffffcfffffffffefede7efefefffffffffffcfffffffffeffeefffffeffff
+fffffffcfffffffffebbb2bbbbbafffffffffffcfffffffffefff1fffffefffffffffffc
+fffffffffeefebefefeefffffffffffcfffffffffefffbfffffefffffffffffcffffffff
+febbbbbbbbbafffffffffffcffffffffff7ff1fffffdfffffffffffcffffffffff7eeefe
+eefdfffffffffffcffffffffffbffffffffbfffffffffffcffffffffffbbbbbbbbbbffff
+fffffffcffffffffffdffffffff7fffffffffffcffffffffffefefefefeffffffffffffc
+fffffffffff7ffffffdffffffffffffcfffffffffff9bbbbbb3ffffffffffffcffffffff
+fffefffffefffffffffffffcffffffffffff1efef1fffffffffffffcffffffffffffe3ff
+8ffffffffffffffcfffffffffffffc007ffffffffffffffcffffffffffffffffffffffff
+fffffffcfffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffc
+fffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffffffffffff
+fffffffffffffffcfffffffffffffffffffffffffffffffcffffffffffffffffffffffff
+fffffffcfffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffc
+fffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffffffffffff
+fffffffffffffffcfffffffffffffffffffffffffffffffcffffffffffffffffffffffff
+fffffffcfffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffc
+fffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffff3f97ffffff
+ffee03effffffffcffffff9f3fffffffffdf7beffffffffcffffff9f3ffbffffffdf7fd7
+fffffffcffffffaea6402533f98f77d7c7fffffcffffffaeb75bb66df6df07bbb7fffffc
+ffffffb5b7bbb6e1f6df77839ffffffcffffffb5b7bbb6eff6df7fbbe7fffffcffffffbb
+b75bb6e6f6df7f7db7fffffcffffff1b024cc871f9de1e388ffffffcffffffffffffffff
+fffffffffffffffcfffffffffffffffffffffffffffffffcffffffffffffffffffffffff
+fffffffcfffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffc
+fffffffffffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffff
+fffffffffffffffffffffffcfffffffffffffffffffffffffffffffcffffffffffffffff
+fffffffffffffffcfffffffffffffffffffffffffffffffcffffffffffffffffffffffff
+fffffffc
+end
+%%PageTrailer
+%%Trailer
+%%EOF
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mfa.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/mfa.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 225 225 1800 1650
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1500 450 300 225 1500 450 1800 675
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 975 1425 300 225 975 1425 1275 1650
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 225 225 600 225 600 675 225 675 225 225
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 525 675 900 1200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1425 675 1050 1200
+-6
+4 0 -1 0 0 0 12 0.0000 4 135 135 825 1500 Y\001
+4 0 -1 0 0 0 12 0.0000 4 165 135 300 525 Q\001
+4 0 -1 0 0 0 12 0.0000 4 135 135 1350 525 X\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mfa.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/mfa.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mixexp.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/mixexp.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+6 525 300 1575 2925
+5 1 0 1 -1 7 0 0 -1 0.000 0 1 1 0 2055.000 1650.000 975 675 600 1650 975 2625
+ 0 0 1.00 60.00 120.00
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1275 525 300 225 1275 525 1575 750
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1275 2700 300 225 1275 2700 1575 2925
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1050 1350 1425 1350 1425 1800 1050 1800 1050 1350
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1275 825 1275 1350
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1275 1800 1275 2475
+-6
+4 0 -1 0 0 0 12 0.0000 4 180 1425 450 3300 Mixture of Experts\001
+4 0 -1 0 0 0 12 0.0000 4 135 120 1200 600 X\001
+4 0 -1 0 0 0 12 0.0000 4 165 135 1125 1650 Q\001
+4 0 -1 0 0 0 12 0.0000 4 135 135 1200 2775 Y\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mixexp.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/mixexp.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mixexp_after.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/mixexp_after.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mixexp_before.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/mixexp_before.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/mixexp_data.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/mixexp_data.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/model_select.png
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/model_select.png has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/qmr.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/qmr.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 750 2700 309 309 750 2700 1050 2775
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 3225 2700 309 309 3225 2700 3525 2775
+1 3 0 3 0 0 100 0 5 0.000 1 0.0000 4500 2700 309 309 4500 2700 4800 2775
+1 3 0 3 0 0 100 0 -1 0.000 1 0.0000 1950 2700 309 309 1950 2700 2025 3000
+1 3 0 3 0 0 100 0 -1 0.000 1 0.0000 5775 2700 309 309 5775 2700 5850 3000
+1 3 0 3 0 0 100 0 -1 0.000 1 0.0000 4125 1200 309 309 4125 1200 4200 1500
+1 3 0 3 0 0 100 0 -1 0.000 1 0.0000 3000 1200 309 309 3000 1200 3075 1500
+1 3 0 3 0 0 100 0 -1 0.000 1 0.0000 1875 1200 309 309 1875 1200 1950 1500
+2 1 0 3 0 7 100 0 -1 0.000 0 0 7 1 1 2
+ 0 0 3.00 180.00 360.00
+ 0 0 3.00 180.00 360.00
+ 450 3750 6150 3750
+2 1 0 3 0 7 100 0 -1 0.000 0 0 7 1 1 2
+ 0 0 3.00 180.00 360.00
+ 0 0 3.00 180.00 360.00
+ 1350 525 4650 525
+2 1 0 3 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 3.00 180.00 360.00
+ 1725 1500 900 2475
+2 1 0 3 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 3.00 180.00 360.00
+ 2850 1500 1050 2475
+2 1 0 3 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 3.00 180.00 360.00
+ 2925 1500 2100 2400
+2 1 0 3 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 3.00 180.00 360.00
+ 3975 1500 2100 2400
+2 1 0 3 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 3.00 180.00 360.00
+ 3075 1575 3150 2325
+2 1 0 3 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 3.00 180.00 360.00
+ 3150 1575 4200 2400
+2 1 0 3 0 7 100 0 -1 0.000 0 0 7 1 0 2
+ 0 0 3.00 180.00 360.00
+ 4200 1500 5475 2475
+4 0 0 100 0 0 24 0.0000 4 255 720 1950 4200 4000\001
+4 0 0 100 0 0 24 0.0000 4 330 1575 2850 4200 Symptoms\001
+4 0 0 100 0 0 24 0.0000 4 255 540 2100 375 600\001
+4 0 0 100 0 0 24 0.0000 4 255 1290 2775 375 Diseases\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/qmr.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/qmr.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/qmr.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/qmr.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/qmr.rnd.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/qmr.rnd.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/rainer_dbn.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/rainer_dbn.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/rainer_tied.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/rainer_tied.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,88 @@
+#FIG 3.2
+Portrait
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 3196.875 3000.000 1275 1875 975 3150 1275 4125
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 5596.875 3000.000 3675 1875 3375 3150 3675 4125
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 0 1 0 3849.948 3549.006 2475 3375 3075 2400 4500 2325
+ 1 1 1.00 60.00 120.00
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 1500 1875 237 237 1500 1875 1575 2100
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 1500 2775 237 237 1500 2775 1575 3000
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2400 2325 237 237 2400 2325 2475 2550
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2325 3600 237 237 2325 3600 2400 3825
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 1500 4275 237 237 1500 4275 1575 4500
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 3900 1875 237 237 3900 1875 3975 2100
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 3900 2775 237 237 3900 2775 3975 3000
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4800 2325 237 237 4800 2325 4875 2550
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4725 3600 237 237 4725 3600 4800 3825
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 3900 4275 237 237 3900 4275 3975 4500
+1 1 1 2 0 7 100 0 -1 6.000 1 0.0000 2625 1875 1725 300 2625 1875 4350 2175
+1 1 1 2 0 7 100 0 -1 6.000 1 0.0000 2625 4275 1800 300 2625 4275 4425 4575
+1 1 1 2 0 7 100 0 -1 6.000 1 0.0000 3375 3600 1725 300 3375 3600 5100 3900
+1 1 1 2 0 7 100 0 -1 6.000 1 0.0000 1500 2775 375 300 1500 2775 1875 3075
+1 1 1 2 0 7 100 0 -1 6.000 1 0.0000 3900 2775 375 300 3900 2775 4275 3075
+1 1 1 2 0 7 100 0 -1 6.000 1 0.0000 4800 2325 375 300 4800 2325 5175 2625
+1 1 1 2 0 7 100 0 -1 6.000 1 0.0000 2400 2325 375 225 2400 2325 2775 2550
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1725 1950 2175 2250
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 2400 1725 2700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2400 2550 2400 3375
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1575 3000 2100 3450
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 3
+ 1 1 1.00 60.00 120.00
+ 1725 1950 2175 3375 2175 3375
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 3750 1650 4125
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4125 1950 4575 2250
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4575 2400 4125 2700
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4800 2550 4800 3375
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3975 3000 4500 3450
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 3
+ 1 1 1.00 60.00 120.00
+ 4125 1950 4575 3375 4575 3375
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4575 3750 4050 4125
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2700 2325 3675 2700
+4 0 0 100 0 2 12 0.0000 4 135 90 1425 1950 1\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2325 2400 2\001
+4 0 0 100 0 2 12 0.0000 4 135 90 1425 2850 3\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2250 3675 4\001
+4 0 0 100 0 2 12 0.0000 4 135 90 1425 4350 5\001
+4 0 0 100 0 2 12 0.0000 4 135 90 3750 1950 6\001
+4 0 0 100 0 2 12 0.0000 4 135 90 4650 2400 7\001
+4 0 0 100 0 2 12 0.0000 4 135 90 3750 2850 8\001
+4 0 0 100 0 2 12 0.0000 4 135 90 4650 3675 9\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3750 4350 10\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2475 2700 E2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 1350 4725 E5\001
+4 0 0 100 0 2 12 0.0000 4 135 210 1725 3900 E4\001
+4 0 0 100 0 2 12 0.0000 4 135 210 1350 3300 E3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 1350 2325 E1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3750 3225 E7\001
+4 0 0 100 0 2 12 0.0000 4 135 210 4800 2850 E6\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/rainer_tied.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/rainer_tied.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/sar.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/sar.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+6 375 225 2175 2325
+6 375 1350 2100 1800
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 675 1575 300 225 675 1575 975 1800
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1800 1575 300 225 1800 1575 2100 1800
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 975 1575 1500 1575
+-6
+6 450 225 1950 675
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 450 225 825 225 825 675 450 675 450 225
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1575 225 1950 225 1950 675 1575 675 1575 225
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 450 1575 450
+-6
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 675 675 675 1350
+2 1 0 1 -1 0 0 0 2 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1800 675 1800 1350
+2 1 1 1 -1 0 0 0 2 4.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 975 1425 1500 525
+4 0 -1 0 0 0 12 0.0000 4 180 1530 600 2250 Switching AR model\001
+-6
+4 0 -1 0 0 0 12 0.0000 4 165 225 525 525 Q1\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 1650 525 Q2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 525 1650 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1650 1650 Y2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/sar.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/sar.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/skf.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/skf.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+#FIG 3.1
+Landscape
+Center
+Inches
+1200 2
+6 300 300 2550 3375
+5 1 0 1 -1 -1 0 0 -1 0.000 0 1 1 0 3718.581 1734.122 1725 600 1425 1725 1575 2550
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 -1 0 0 -1 0.000 0 1 1 0 2652.330 1587.076 600 600 375 1575 525 2400
+ 0 0 1.00 60.00 120.00
+6 1650 1200 2250 2775
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1950 2550 300 225 1950 2550 2250 2775
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1950 1425 300 225 1950 1425 2250 1650
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 1725 1950 2250
+-6
+6 675 300 2175 750
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 675 300 1050 300 1050 750 675 750 675 300
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1800 300 2175 300 2175 750 1800 750 1800 300
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 525 1800 525
+-6
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 825 2550 300 225 825 2550 1125 2775
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 825 1425 300 225 825 1425 1125 1650
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 750 825 1200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 750 1950 1200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1125 1425 1650 1425
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 1725 825 2250
+4 0 -1 0 0 0 12 0.0000 4 180 1740 750 3300 Switching Kalman filter\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 750 600 Q1\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 1875 600 Q2\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 675 1500 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 210 1800 1500 X2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 675 2625 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1800 2625 Y2\001
+-6
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/skf.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/skf.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/skf3.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/skf3.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 300 300 3750 3450
+5 1 0 2 0 0 50 0 -1 0.000 0 1 1 0 2100.000 1950.000 675 975 375 1875 675 2925
+ 1 1 2.00 120.00 240.00
+5 1 0 2 0 0 50 0 -1 0.000 0 1 1 0 3300.000 1950.000 1875 975 1575 1875 1875 2925
+ 1 1 2.00 120.00 240.00
+5 1 0 2 0 0 50 0 -1 0.000 0 1 1 0 4500.000 1950.000 3075 975 2775 1875 3075 2925
+ 1 1 2.00 120.00 240.00
+1 3 0 2 0 0 50 0 -1 0.000 1 0.0000 975 1875 335 335 975 1875 1125 2175
+1 3 0 2 0 0 50 0 -1 0.000 1 0.0000 3375 1875 335 335 3375 1875 3525 2175
+1 3 0 2 0 0 50 0 20 0.000 1 0.0000 975 3075 335 335 975 3075 1125 3375
+1 3 0 2 0 0 50 0 20 0.000 1 0.0000 2175 3075 335 335 2175 3075 2325 3375
+1 3 0 2 0 0 50 0 20 0.000 1 0.0000 3375 3075 335 335 3375 3075 3525 3375
+1 3 0 2 0 0 50 0 -1 0.000 1 0.0000 2175 1875 335 335 2175 1875 2325 2175
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 675 375 1275 375 1275 975 675 975 675 375
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 1875 375 2475 375 2475 975 1875 975 1875 375
+2 2 0 2 0 0 50 0 -1 0.000 0 0 7 0 0 5
+ 3075 375 3675 375 3675 975 3075 975 3075 375
+2 1 0 2 0 0 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 975 975 975 1575
+2 1 0 2 0 0 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 975 2175 975 2775
+2 1 0 2 0 0 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 2175 975 2175 1575
+2 1 0 2 0 0 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 2175 2175 2175 2775
+2 1 0 2 0 0 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 3375 975 3375 1575
+2 1 0 2 0 0 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 3375 2175 3375 2775
+2 1 0 2 0 0 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 1275 1875 1875 1875
+2 1 0 2 0 0 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 2.00 120.00 240.00
+ 2475 1875 3075 1875
+4 0 0 50 0 0 24 0.0000 4 255 225 750 750 Z\001
+4 0 0 50 0 0 24 0.0000 4 255 225 1950 750 Z\001
+4 0 0 50 0 0 24 0.0000 4 255 180 2175 900 2\001
+4 0 0 50 0 0 24 0.0000 4 255 225 3150 750 Z\001
+4 0 0 50 0 0 24 0.0000 4 255 180 3375 900 3\001
+4 0 0 50 0 0 24 0.0000 4 255 270 825 2025 X\001
+4 0 0 50 0 0 24 0.0000 4 255 180 1050 2175 1\001
+4 0 0 50 0 0 24 0.0000 4 255 180 2250 2175 2\001
+4 0 0 50 0 0 24 0.0000 4 255 180 3450 2175 3\001
+4 0 0 50 0 0 24 0.0000 4 255 270 3225 2025 X\001
+4 0 0 50 0 0 24 0.0000 4 255 270 2025 2025 X\001
+4 0 7 50 0 0 24 0.0000 4 255 240 825 3150 Y\001
+4 0 7 50 0 0 24 0.0000 4 255 240 2025 3150 Y\001
+4 0 7 50 0 0 24 0.0000 4 255 240 3225 3150 Y\001
+4 0 7 50 0 0 24 0.0000 4 255 180 1050 3300 1\001
+4 0 7 50 0 0 24 0.0000 4 255 180 2175 3375 2\001
+4 0 7 50 0 0 24 0.0000 4 255 180 3375 3300 3\001
+4 0 0 50 0 0 24 0.0000 4 255 180 975 900 1\001
+-6
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/skf3_nobold.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/skf3_nobold.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,66 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 1 -1 -1 0 0 -1 0.000 0 1 1 0 2652.330 1587.076 600 600 375 1575 525 2400
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 -1 0 0 -1 0.000 0 1 1 0 3718.581 1734.122 1725 600 1425 1725 1575 2550
+ 0 0 1.00 60.00 120.00
+5 1 0 1 -1 -1 0 0 -1 0.000 0 1 1 0 4843.581 1734.122 2850 600 2550 1725 2700 2550
+ 0 0 1.00 60.00 120.00
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 825 2550 300 225 825 2550 1125 2775
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 825 1425 300 225 825 1425 1125 1650
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1950 2550 300 225 1950 2550 2250 2775
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1950 1425 300 225 1950 1425 2250 1650
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3075 1425 300 225 3075 1425 3375 1650
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 3075 2550 300 225 3075 2550 3375 2775
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 750 825 1200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 1725 825 2250
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 750 1950 1200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1125 1425 1650 1425
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3075 750 3075 1200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2250 1425 2775 1425
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 675 300 1050 300 1050 750 675 750 675 300
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1800 300 2175 300 2175 750 1800 750 1800 300
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 525 1800 525
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 1725 1950 2250
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3075 1725 3075 2250
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2850 300 3225 300 3225 750 2850 750 2850 300
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2100 525 2850 525
+4 0 -1 0 0 0 12 0.0000 4 135 225 675 1500 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 675 2625 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1800 1500 X2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1800 2625 Y2\001
+4 0 0 50 0 0 12 0.0000 4 135 225 2850 1500 X3\001
+4 0 0 0 0 0 12 0.0000 4 135 225 2925 2625 Y3\001
+4 0 0 50 0 0 12 0.0000 4 135 210 750 600 Z1\001
+4 0 0 50 0 0 12 0.0000 4 135 210 1875 600 Z2\001
+4 0 0 50 0 0 12 0.0000 4 135 210 2925 600 Z3\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/skf3_nosolid.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/skf3_nosolid.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,66 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 2 0 7 50 0 -1 0.000 0 1 1 0 2652.330 1587.076 600 600 375 1575 525 2400
+ 1 1 2.00 120.00 240.00
+5 1 0 2 0 7 50 0 -1 0.000 0 1 1 0 3718.581 1734.122 1725 600 1425 1725 1575 2550
+ 1 1 2.00 120.00 240.00
+5 1 0 2 0 7 50 0 -1 0.000 0 1 1 0 4843.581 1734.122 2850 600 2550 1725 2700 2550
+ 1 1 2.00 120.00 240.00
+1 1 0 2 0 7 50 0 -1 0.000 1 0.0000 1950 1425 300 225 1950 1425 2250 1650
+1 1 0 2 0 7 50 0 -1 0.000 1 0.0000 825 1425 300 225 825 1425 1125 1650
+1 1 0 2 0 0 50 0 7 0.000 1 0.0000 825 2550 300 225 825 2550 1125 2775
+1 1 0 2 0 0 50 0 7 0.000 1 0.0000 3075 2550 300 225 3075 2550 3375 2775
+1 1 0 2 0 0 50 0 7 0.000 1 0.0000 1950 2550 300 225 1950 2550 2250 2775
+1 1 0 2 0 7 50 0 -1 0.000 1 0.0000 3075 1425 300 225 3075 1425 3375 1650
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 825 1725 825 2250
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 1125 1425 1650 1425
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 1800 300 2175 300 2175 750 1800 750 1800 300
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 2100 525 2850 525
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 2850 300 3225 300 3225 750 2850 750 2850 300
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 1950 1725 1950 2250
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 2250 1425 2775 1425
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 1950 750 1950 1200
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 3075 750 3075 1200
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 3075 1725 3075 2250
+2 1 0 2 0 7 50 0 -1 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 825 750 825 1200
+2 2 0 2 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 675 300 1050 300 1050 750 675 750 675 300
+2 1 0 2 0 0 50 0 7 0.000 0 0 7 1 0 2
+ 1 1 2.00 120.00 240.00
+ 1050 525 1800 525
+4 0 0 50 0 2 12 0.0000 4 135 225 675 2625 Y1\001
+4 0 0 50 0 2 12 0.0000 4 135 225 2925 2625 Y3\001
+4 0 0 50 0 2 12 0.0000 4 135 225 675 1500 X1\001
+4 0 0 50 0 2 12 0.0000 4 135 225 1800 1500 X2\001
+4 0 0 50 0 2 12 0.0000 4 135 225 2850 1500 X3\001
+4 0 0 50 0 2 12 0.0000 4 135 210 750 600 Z1\001
+4 0 0 50 0 2 12 0.0000 4 135 210 1875 600 Z2\001
+4 0 0 50 0 2 12 0.0000 4 135 210 2925 600 Z3\001
+4 0 0 50 0 2 12 0.0000 4 135 225 1875 2625 Y2\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/skf3_polytree.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/skf3_polytree.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,60 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 825 2550 300 225 825 2550 1125 2775
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 825 1425 300 225 825 1425 1125 1650
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 1950 2550 300 225 1950 2550 2250 2775
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1950 1425 300 225 1950 1425 2250 1650
+1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3075 1425 300 225 3075 1425 3375 1650
+1 1 0 1 -1 0 0 0 2 0.000 1 0.0000 3075 2550 300 225 3075 2550 3375 2775
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 750 825 1200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 825 1725 825 2250
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 750 1950 1200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1125 1425 1650 1425
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3075 750 3075 1200
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2250 1425 2775 1425
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 675 300 1050 300 1050 750 675 750 675 300
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 1800 300 2175 300 2175 750 1800 750 1800 300
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1050 525 1800 525
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 1725 1950 2250
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 3075 1725 3075 2250
+2 2 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 5
+ 2850 300 3225 300 3225 750 2850 750 2850 300
+2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2100 525 2850 525
+4 0 -1 0 0 0 12 0.0000 4 165 225 750 600 Q1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 675 1500 X1\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 675 2625 Y1\001
+4 0 -1 0 0 0 12 0.0000 4 165 225 1875 600 Q2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1800 1500 X2\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 1800 2625 Y2\001
+4 0 0 50 0 0 12 0.0000 4 165 225 2925 600 Q3\001
+4 0 0 50 0 0 12 0.0000 4 135 225 2850 1500 X3\001
+4 0 0 0 0 0 12 0.0000 4 135 225 2925 2625 Y3\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,73 @@
+#FIG 3.2
+Portrait
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 3675 1275 6000 3450
+1 1 0 1 -1 7 0 0 -1 0.000 1 0.0000 4125 2400 450 225 4125 2400 4575 2625
+1 1 0 1 -1 7 0 0 -1 0.000 1 0.0000 4800 1500 450 225 4800 1500 5250 1725
+1 1 0 1 -1 7 0 0 -1 0.000 1 0.0000 5550 2400 450 225 5550 2400 6000 2625
+1 1 0 1 -1 7 0 0 -1 0.000 1 0.0000 4875 3225 450 225 4875 3225 5325 3450
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4650 1725 4200 2175
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 5025 1725 5400 2175
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4350 2625 4800 3000
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 5475 2700 5100 3000
+4 0 -1 0 0 0 12 0.0000 4 180 525 4575 1575 Cloudy\001
+4 0 -1 0 0 0 12 0.0000 4 180 675 3825 2475 Sprinkler\001
+4 0 -1 0 0 0 12 0.0000 4 135 345 5400 2475 Rain\001
+4 0 -1 0 0 0 12 0.0000 4 135 795 4500 3300 WetGrass\001
+-6
+6 3975 3900 6000 5775
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 2
+ 3975 4275 6000 4275
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 1
+ 4425 3975
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 2
+ 4485 3960 4485 5730
+4 0 -1 0 0 0 12 0.0000 4 135 300 4050 4500 F F\001
+4 0 -1 0 0 0 12 0.0000 4 135 810 4650 4500 1.0 0.0\001
+4 0 -1 0 0 0 12 0.0000 4 135 300 4050 4875 T F\001
+4 0 -1 0 0 0 12 0.0000 4 135 300 4050 5250 F T\001
+4 0 -1 0 0 0 12 0.0000 4 135 345 4050 5625 T T\001
+4 0 -1 0 0 0 12 0.0000 4 135 855 4650 4875 0.1 0.9\001
+4 0 -1 0 0 0 12 0.0000 4 135 855 4650 5250 0.1 0.9\001
+4 0 -1 0 0 0 12 0.0000 4 135 900 4650 5625 0.01 0.99\001
+4 0 -1 0 0 0 12 0.0000 4 180 1785 4125 4230 S R P(W=F) P(W=T)\001
+-6
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 2
+ 4125 525 5700 525
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 2
+ 1650 2625 3375 2625
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 2
+ 2025 2325 2025 3300
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 2
+ 6525 2700 8250 2700
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 0 0 2
+ 6900 2400 6900 3450
+4 0 -1 0 0 0 12 0.0000 4 180 1260 4290 450 P(C=F) P(C=T)\001
+4 0 -1 0 0 0 12 0.0000 4 135 855 4425 825 0.5 0.5\001
+4 0 -1 0 0 0 12 0.0000 4 135 105 1725 2925 F\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 2175 2925 0.5\001
+4 0 -1 0 0 0 12 0.0000 4 135 225 2775 2925 0.5\001
+4 0 -1 0 0 0 12 0.0000 4 135 105 1725 3225 T\001
+4 0 -1 0 0 0 12 0.0000 4 135 900 2175 3225 0.9 0.1\001
+4 0 -1 0 0 0 12 0.0000 4 135 120 6600 2625 C\001
+4 0 -1 0 0 0 12 0.0000 4 135 105 6600 3000 F\001
+4 0 -1 0 0 0 12 0.0000 4 135 105 6525 3375 T\001
+4 0 -1 0 0 0 12 0.0000 4 135 945 7050 3375 0.2 0.8\001
+4 0 -1 0 0 0 12 0.0000 4 135 945 7050 3000 0.8 0.2\001
+4 0 0 100 0 0 12 0.0000 4 135 120 1650 2550 C\001
+4 0 0 100 0 0 12 0.0000 4 180 1185 2100 2550 P(S=F) P(S=T)\001
+4 0 0 100 0 0 12 0.0000 4 180 1215 6975 2625 P(R=F) P(R=T)\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler.noparams.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler.noparams.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+#FIG 3.2
+Portrait
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 600 300 2925 2475
+1 1 0 1 -1 7 0 0 -1 0.000 1 0.0000 1050 1425 450 225 1050 1425 1500 1650
+1 1 0 1 -1 7 0 0 -1 0.000 1 0.0000 1725 525 450 225 1725 525 2175 750
+1 1 0 1 -1 7 0 0 -1 0.000 1 0.0000 2475 1425 450 225 2475 1425 2925 1650
+1 1 0 1 -1 7 0 0 -1 0.000 1 0.0000 1800 2250 450 225 1800 2250 2250 2475
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1575 750 1125 1200
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1950 750 2325 1200
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 1275 1650 1725 2025
+2 1 0 1 -1 7 0 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2400 1725 2025 2025
+4 0 -1 0 0 0 12 0.0000 4 180 525 1500 600 Cloudy\001
+4 0 -1 0 0 0 12 0.0000 4 180 675 750 1500 Sprinkler\001
+4 0 -1 0 0 0 12 0.0000 4 135 345 2325 1500 Rain\001
+4 0 -1 0 0 0 12 0.0000 4 135 795 1425 2325 WetGrass\001
+-6
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler_bar.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler_bar.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler_bar.jpg
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler_bar.jpg has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler_bar.png
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/sprinkler_bar.png has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/water3.cts.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/water3.cts.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,266 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 3229.069 1880.389 1950 3450 1275 1350 1875 375
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 1 1 0 4200.000 6900.000 1950 5700 1650 6900 1950 8100
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 1 1 0 3787.500 6900.000 1950 4950 1125 7200 1950 8850
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 7232.812 1950.000 5775 3450 5175 1575 5775 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 1 1 0 7387.500 6975.000 5775 5700 5400 7500 5775 8250
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 1 1 0 7188.461 6911.539 5775 4950 5175 8250 5850 8925
+ 1 1 1.00 60.00 120.00
+6 2100 75 4425 9225
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 5282.812 1950.000 3825 3450 3225 1575 3825 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 1 1 0 5437.500 6975.000 3825 5700 3450 7500 3825 8250
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 1 1 0 5238.461 6911.539 3825 4950 3225 8250 3900 8925
+ 1 1 1.00 60.00 120.00
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 4125 8250 237 237 4125 8250 4200 8475
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 4125 8925 237 237 4125 8925 4200 9150
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 4050 375 237 237 4050 375 4125 600
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 4050 1050 237 237 4050 1050 4125 1275
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7050 3825 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7125 3825 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7200 3825 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6525 3825 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6450 3825 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5700 3825 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5025 3825 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5625 3825 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2850 3825 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4350 3825 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5550 3825 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4950 3825 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3600 3825 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4200 3825 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4800 3825 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3525 3825 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4125 3825 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3450 3825 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2775 3825 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2025 3825 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2700 3825 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 1950 3825 1950
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 2175 4275 1725 3825 1725 3825 2175 4275 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 2925 4275 2475 3825 2475 3825 2925 4275 2925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 3675 4275 3225 3825 3225 3825 3675 4275 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 4425 4275 3975 3825 3975 3825 4425 4275 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 5175 4275 4725 3825 4725 3825 5175 4275 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 5925 4275 5475 3825 5475 3825 5925 4275 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 6675 4275 6225 3825 6225 3825 6675 4275 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 7425 4275 6975 3825 6975 3825 7425 4275 7425
+2 1 0 2 0 0 100 0 3 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 1650 2175 1200
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4050 1725 4050 1275
+-6
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 2175 8250 237 237 2175 8250 2250 8475
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 2175 8925 237 237 2175 8925 2250 9150
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 2175 300 237 237 2175 300 2250 525
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 2175 975 237 237 2175 975 2250 1200
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 6075 8250 237 237 6075 8250 6150 8475
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 6075 8925 237 237 6075 8925 6150 9150
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 6000 375 237 237 6000 375 6075 600
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 6000 1050 237 237 6000 1050 6075 1275
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 7425 2400 6975 1950 6975 1950 7425 2400 7425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 6675 2400 6225 1950 6225 1950 6675 2400 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 5925 2400 5475 1950 5475 1950 5925 2400 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 5175 2400 4725 1950 4725 1950 5175 2400 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 4425 2400 3975 1950 3975 1950 4425 2400 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 3675 2400 3225 1950 3225 1950 3675 2400 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 2175 2400 1725 1950 1725 1950 2175 2400 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 2925 2400 2475 1950 2475 1950 2925 2400 2925
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7050 5775 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7125 5775 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7200 5775 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6525 5775 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6450 5775 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5700 5775 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5025 5775 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5625 5775 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2850 5775 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4350 5775 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5550 5775 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4950 5775 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3600 5775 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4200 5775 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4800 5775 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3525 5775 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4125 5775 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3450 5775 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2775 5775 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2025 5775 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2700 5775 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 1950 5775 1950
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 2175 6225 1725 5775 1725 5775 2175 6225 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 2925 6225 2475 5775 2475 5775 2925 6225 2925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 3675 6225 3225 5775 3225 5775 3675 6225 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 4425 6225 3975 5775 3975 5775 4425 6225 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 5175 6225 4725 5775 4725 5775 5175 6225 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 5925 6225 5475 5775 5475 5775 5925 6225 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 6675 6225 6225 5775 6225 5775 6675 6225 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 7425 6225 6975 5775 6975 5775 7425 6225 7425
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 1725 6000 1275
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 2025 A1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3900 2025 A2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 2025 A3\001
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 2775 B1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3900 2775 B2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 2775 B3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2025 3525 C1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3900 3525 C2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 5850 3525 C3\001
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 4275 D1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3900 4275 D2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 4275 D3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2025 5025 E1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3900 5025 E2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 5850 5025 E3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3900 5775 F2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2025 5775 F1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 5850 5775 F3\001
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 6525 G1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3975 6525 G2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 6525 G3\001
+4 0 0 100 0 2 12 0.0000 4 135 240 3900 7275 H2\001
+4 0 0 100 0 2 12 0.0000 4 135 240 5925 7275 H3\001
+4 0 0 100 0 2 12 0.0000 4 135 240 2025 7275 H1\001
+4 0 0 100 0 2 12 0.0000 4 135 360 1950 1050 YA1\001
+4 0 0 100 0 2 12 0.0000 4 135 360 3900 1125 YA2\001
+4 0 0 100 0 2 12 0.0000 4 135 360 5850 1125 YA3\001
+4 0 0 100 0 2 12 0.0000 4 135 345 1950 375 YC1\001
+4 0 0 100 0 2 12 0.0000 4 135 345 3900 450 YC2\001
+4 0 0 100 0 2 12 0.0000 4 135 345 5850 450 YC3\001
+4 0 0 100 0 2 12 0.0000 4 135 345 2025 8325 YE1\001
+4 0 0 100 0 2 12 0.0000 4 135 345 3975 8325 YE2\001
+4 0 0 100 0 2 12 0.0000 4 135 345 5925 8325 YE3\001
+4 0 0 100 0 2 12 0.0000 4 135 345 3975 9000 YF2\001
+4 0 0 100 0 2 12 0.0000 4 135 345 5925 9000 YF3\001
+4 0 0 100 0 2 12 0.0000 4 135 345 2025 9000 YF1\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/water3.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/water3.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,279 @@
+#FIG 3.2
+Portrait
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 7232.812 1950.000 5775 3450 5175 1575 5775 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 5282.812 1950.000 3825 3450 3225 1575 3825 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 5445.833 6562.500 1950 4200 1275 7200 1950 8925
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 7595.833 6637.500 3825 4275 3150 6825 3825 9000
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 8419.351 6558.096 5700 4200 5100 7950 5775 9000
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 3332.812 2025.000 1875 3525 1275 1650 1875 525
+ 1 1 1.00 60.00 120.00
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 7425 2400 6975 1950 6975 1950 7425 2400 7425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 6675 2400 6225 1950 6225 1950 6675 2400 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 5925 2400 5475 1950 5475 1950 5925 2400 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 5175 2400 4725 1950 4725 1950 5175 2400 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 4425 2400 3975 1950 3975 1950 4425 2400 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 3675 2400 3225 1950 3225 1950 3675 2400 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 2175 2400 1725 1950 1725 1950 2175 2400 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 2925 2400 2475 1950 2475 1950 2925 2400 2925
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7050 5775 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7125 5775 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7200 5775 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6525 5775 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6450 5775 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5700 5775 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5025 5775 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5625 5775 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2850 5775 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4350 5775 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5550 5775 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4950 5775 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3600 5775 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4200 5775 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4800 5775 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3525 5775 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4125 5775 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3450 5775 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2775 5775 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2025 5775 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2700 5775 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 1950 5775 1950
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 2175 6225 1725 5775 1725 5775 2175 6225 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 2925 6225 2475 5775 2475 5775 2925 6225 2925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 3675 6225 3225 5775 3225 5775 3675 6225 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 4425 6225 3975 5775 3975 5775 4425 6225 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 5175 6225 4725 5775 4725 5775 5175 6225 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 5925 6225 5475 5775 5475 5775 5925 6225 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 6675 6225 6225 5775 6225 5775 6675 6225 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 7425 6225 6975 5775 6975 5775 7425 6225 7425
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 1725 6000 1275
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7050 3825 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7125 3825 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7200 3825 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6525 3825 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6450 3825 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5700 3825 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5025 3825 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5625 3825 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2850 3825 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4350 3825 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5550 3825 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4950 3825 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3600 3825 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4200 3825 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4800 3825 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3525 3825 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4125 3825 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3450 3825 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2775 3825 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2025 3825 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2700 3825 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 1950 3825 1950
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 2175 4275 1725 3825 1725 3825 2175 4275 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 2925 4275 2475 3825 2475 3825 2925 4275 2925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 3675 4275 3225 3825 3225 3825 3675 4275 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 4425 4275 3975 3825 3975 3825 4425 4275 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 5175 4275 4725 3825 4725 3825 5175 4275 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 5925 4275 5475 3825 5475 3825 5925 4275 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 6675 4275 6225 3825 6225 3825 6675 4275 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 7425 4275 6975 3825 6975 3825 7425 4275 7425
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4050 1725 4050 1275
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 1275 6225 825 5775 825 5775 1275 6225 1275
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 600 6225 150 5775 150 5775 600 6225 600
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 600 4275 150 3825 150 3825 600 4275 600
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 1275 4275 825 3825 825 3825 1275 4275 1275
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 7425 2175 7950
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 8400 2400 7950 1950 7950 1950 8400 2400 8400
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 8400 4275 7950 3825 7950 3825 8400 4275 8400
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 8400 6225 7950 5775 7950 5775 8400 6225 8400
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4050 7425 4050 7950
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 7425 6000 7950
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 9150 2400 8700 1950 8700 1950 9150 2400 9150
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 9150 4275 8700 3825 8700 3825 9150 4275 9150
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 9150 6225 8700 5775 8700 5775 9150 6225 9150
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 1200 2400 750 1950 750 1950 1200 2400 1200
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 600 2400 150 1950 150 1950 600 2400 600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 1650 2175 1200
+4 0 0 100 0 2 12 0.0000 4 135 90 2100 2025 1\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2100 2775 2\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2100 3525 3\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2025 4275 4\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2025 5025 5\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2025 5775 6\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2025 6525 7\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2025 7275 8\001
+4 0 0 100 0 2 12 0.0000 4 135 180 2025 9000 11\001
+4 0 0 100 0 2 12 0.0000 4 135 180 2100 8250 12\001
+4 0 0 100 0 2 12 0.0000 4 135 90 2100 1050 9\001
+4 0 0 100 0 2 12 0.0000 4 135 180 2100 450 10\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 2025 13\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 2775 14\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 3525 15\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 4275 16\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 5025 17\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 5775 18\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 6525 19\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 7275 20\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 1125 21\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3975 450 22\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 9000 23\001
+4 0 0 100 0 2 12 0.0000 4 135 180 3900 8250 24\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 2025 25\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 2775 26\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 3525 27\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 4275 28\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 5025 29\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 5775 30\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 6525 31\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 7275 32\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 1125 33\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5925 450 34\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 8250 36\001
+4 0 0 100 0 2 12 0.0000 4 135 180 5850 9000 35\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/water3.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/water3.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/water3_75.gif
Binary file toolboxes/FullBNT-1.0.7/docs/Figures/water3_75.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/water3_circle.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/water3_circle.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,231 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 3229.069 1880.389 1950 3450 1275 1350 1875 375
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 7232.812 1950.000 5775 3450 5175 1575 5775 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 5282.812 1950.000 3825 3450 3225 1575 3825 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 6931.999 6519.041 1875 4275 1425 7050 1950 8925
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 7187.500 6562.500 3825 4275 3150 6075 3825 8850
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 9787.500 6592.500 5775 4275 5175 6150 5700 8775
+ 0 0 1.00 60.00 120.00
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 2175 8925 237 237 2175 8925 2250 9150
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 2175 300 237 237 2175 300 2250 525
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 2175 975 237 237 2175 975 2250 1200
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2100 1950 237 237 2100 1950 2175 2175
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2175 2700 237 237 2175 2700 2250 2925
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2175 3450 237 237 2175 3450 2250 3675
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2100 4200 237 237 2100 4200 2175 4425
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2175 4950 237 237 2175 4950 2250 5175
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2175 5700 237 237 2175 5700 2250 5925
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2175 6450 237 237 2175 6450 2250 6675
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 2175 7125 237 237 2175 7125 2250 7350
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 4050 375 237 237 4050 375 4125 600
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 4050 1050 237 237 4050 1050 4125 1275
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4050 1950 237 237 4050 1950 4125 2175
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4050 2700 237 237 4050 2700 4125 2925
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4050 3450 237 237 4050 3450 4125 3675
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4050 4200 237 237 4050 4200 4125 4425
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4050 4950 237 237 4050 4950 4125 5175
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4050 5700 237 237 4050 5700 4125 5925
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4050 6450 237 237 4050 6450 4125 6675
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 4050 7200 237 237 4050 7200 4125 7425
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 6000 1950 237 237 6000 1950 6075 2175
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 6000 2700 237 237 6000 2700 6075 2925
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 6000 4200 237 237 6000 4200 6075 4425
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 6000 4875 237 237 6000 4875 6075 5100
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 6000 5700 237 237 6000 5700 6075 5925
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 6000 6450 237 237 6000 6450 6075 6675
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 6000 7200 237 237 6000 7200 6075 7425
+1 3 0 2 0 7 100 0 -1 0.000 1 0.0000 6000 3450 237 237 6000 3450 6075 3675
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 2175 8250 237 237 2175 8250 2250 8475
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 4050 8250 237 237 4050 8250 4125 8475
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 4050 8925 237 237 4050 8925 4125 9150
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 6000 8175 237 237 6000 8175 6075 8400
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 6000 8850 237 237 6000 8850 6075 9075
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 6000 375 237 237 6000 375 6075 600
+1 3 0 2 0 0 100 0 3 0.000 1 0.0000 6000 1050 237 237 6000 1050 6075 1275
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7050 5775 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7125 5775 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7200 5775 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6525 5775 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6450 5775 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5700 5775 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5025 5775 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5625 5775 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2850 5775 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4350 5775 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5550 5775 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4950 5775 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3600 5775 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4200 5775 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4800 5775 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3525 5775 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4125 5775 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3450 5775 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2775 5775 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2025 5775 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2700 5775 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 1950 5775 1950
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 1725 6000 1275
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7050 3825 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7125 3825 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7200 3825 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6525 3825 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6450 3825 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5700 3825 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5025 3825 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5625 3825 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2850 3825 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4350 3825 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5550 3825 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4950 3825 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3600 3825 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4200 3825 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4800 3825 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3525 3825 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4125 3825 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3450 3825 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2775 3825 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2025 3825 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2700 3825 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 1950 3825 1950
+2 1 0 2 0 0 100 0 3 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 1650 2175 1200
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4050 1725 4050 1275
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 2175 7350 2175 7950
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 4050 7425 4050 8025
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 120.00
+ 6000 7425 6000 7950
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 2025 A1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3900 2025 A2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 2025 A3\001
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 2775 B1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3900 2775 B2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 2775 B3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2025 3525 C1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3900 3525 C2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 5850 3525 C3\001
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 4275 D1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3900 4275 D2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 4275 D3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2025 5025 E1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3900 5025 E2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 5850 5025 E3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3900 5775 F2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2025 5775 F1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 5850 5775 F3\001
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 6525 G1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3975 6525 G2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 6525 G3\001
+4 0 0 100 0 2 12 0.0000 4 135 240 3900 7275 H2\001
+4 0 0 100 0 2 12 0.0000 4 135 240 5925 7275 H3\001
+4 0 0 100 0 2 12 0.0000 4 135 240 2025 7275 H1\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/water3_named_nodes.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/water3_named_nodes.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,279 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 3229.069 1880.389 1950 3450 1275 1350 1875 375
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 7232.812 1950.000 5775 3450 5175 1575 5775 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 5282.812 1950.000 3825 3450 3225 1575 3825 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 5445.833 6562.500 1950 4200 1275 7200 1950 8925
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 7595.833 6637.500 3825 4275 3150 6825 3825 9000
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 8419.351 6558.096 5700 4200 5100 7950 5775 9000
+ 1 1 1.00 60.00 120.00
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 7425 2400 6975 1950 6975 1950 7425 2400 7425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 6675 2400 6225 1950 6225 1950 6675 2400 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 5925 2400 5475 1950 5475 1950 5925 2400 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 5175 2400 4725 1950 4725 1950 5175 2400 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 4425 2400 3975 1950 3975 1950 4425 2400 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 3675 2400 3225 1950 3225 1950 3675 2400 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 2175 2400 1725 1950 1725 1950 2175 2400 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 2925 2400 2475 1950 2475 1950 2925 2400 2925
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7050 5775 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7125 5775 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7200 5775 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6525 5775 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6450 5775 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5700 5775 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5025 5775 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5625 5775 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2850 5775 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4350 5775 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5550 5775 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4950 5775 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3600 5775 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4200 5775 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4800 5775 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3525 5775 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4125 5775 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3450 5775 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2775 5775 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2025 5775 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2700 5775 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 1950 5775 1950
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 2175 6225 1725 5775 1725 5775 2175 6225 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 2925 6225 2475 5775 2475 5775 2925 6225 2925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 3675 6225 3225 5775 3225 5775 3675 6225 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 4425 6225 3975 5775 3975 5775 4425 6225 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 5175 6225 4725 5775 4725 5775 5175 6225 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 5925 6225 5475 5775 5475 5775 5925 6225 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 6675 6225 6225 5775 6225 5775 6675 6225 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 7425 6225 6975 5775 6975 5775 7425 6225 7425
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 1725 6000 1275
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7050 3825 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7125 3825 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7200 3825 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6525 3825 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6450 3825 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5700 3825 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5025 3825 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5625 3825 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2850 3825 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4350 3825 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5550 3825 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4950 3825 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3600 3825 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4200 3825 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4800 3825 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3525 3825 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4125 3825 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3450 3825 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2775 3825 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2025 3825 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2700 3825 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 1950 3825 1950
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 2175 4275 1725 3825 1725 3825 2175 4275 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 2925 4275 2475 3825 2475 3825 2925 4275 2925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 3675 4275 3225 3825 3225 3825 3675 4275 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 4425 4275 3975 3825 3975 3825 4425 4275 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 5175 4275 4725 3825 4725 3825 5175 4275 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 5925 4275 5475 3825 5475 3825 5925 4275 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 6675 4275 6225 3825 6225 3825 6675 4275 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 7425 4275 6975 3825 6975 3825 7425 4275 7425
+2 1 0 2 0 0 100 0 3 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 1650 2175 1200
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4050 1725 4050 1275
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 1275 6225 825 5775 825 5775 1275 6225 1275
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 600 6225 150 5775 150 5775 600 6225 600
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 600 4275 150 3825 150 3825 600 4275 600
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 1275 4275 825 3825 825 3825 1275 4275 1275
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2325 1200 2325 750 1875 750 1875 1200 2325 1200
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2325 525 2325 75 1875 75 1875 525 2325 525
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 7425 2175 7950
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 8400 2400 7950 1950 7950 1950 8400 2400 8400
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 8400 4275 7950 3825 7950 3825 8400 4275 8400
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 8400 6225 7950 5775 7950 5775 8400 6225 8400
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4050 7425 4050 7950
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 7425 6000 7950
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 9150 2400 8700 1950 8700 1950 9150 2400 9150
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 9150 4275 8700 3825 8700 3825 9150 4275 9150
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 9150 6225 8700 5775 8700 5775 9150 6225 9150
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 2025 A1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3900 2025 A2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 2025 A3\001
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 2775 B1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3900 2775 B2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 2775 B3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2025 3525 C1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3900 3525 C2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 5850 3525 C3\001
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 4275 D1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3900 4275 D2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 4275 D3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2025 5025 E1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3900 5025 E2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 5850 5025 E3\001
+4 0 0 100 0 2 12 0.0000 4 135 210 3900 5775 F2\001
+4 0 0 100 0 2 12 0.0000 4 135 210 2025 5775 F1\001
+4 0 0 100 0 2 12 0.0000 4 135 210 5850 5775 F3\001
+4 0 0 100 0 2 12 0.0000 4 135 225 2025 6525 G1\001
+4 0 0 100 0 2 12 0.0000 4 135 225 3975 6525 G2\001
+4 0 0 100 0 2 12 0.0000 4 135 225 5850 6525 G3\001
+4 0 0 100 0 2 12 0.0000 4 135 240 3900 7275 H2\001
+4 0 0 100 0 2 12 0.0000 4 135 240 5925 7275 H3\001
+4 0 0 100 0 2 12 0.0000 4 135 240 2025 7275 H1\001
+4 0 0 100 0 2 12 0.0000 4 135 360 1950 1050 YA1\001
+4 0 0 100 0 2 12 0.0000 4 135 360 3900 1125 YA2\001
+4 0 0 100 0 2 12 0.0000 4 135 360 5850 1125 YA3\001
+4 0 0 100 0 2 12 0.0000 4 135 345 1950 375 YC1\001
+4 0 0 100 0 2 12 0.0000 4 135 345 3900 450 YC2\001
+4 0 0 100 0 2 12 0.0000 4 135 345 5850 450 YC3\001
+4 0 0 100 0 2 12 0.0000 4 135 375 2025 8250 YH1\001
+4 0 0 100 0 2 12 0.0000 4 135 375 3900 8250 YH2\001
+4 0 0 100 0 2 12 0.0000 4 135 375 5850 8250 YH3\001
+4 0 0 100 0 2 12 0.0000 4 135 360 2025 9000 YD1\001
+4 0 0 100 0 2 12 0.0000 4 135 360 3900 9000 YD2\001
+4 0 0 100 0 2 12 0.0000 4 135 360 5850 9000 YD3\001
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Figures/water3_nolabels.fig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/Figures/water3_nolabels.fig Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,243 @@
+#FIG 3.2
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 7232.812 1950.000 5775 3450 5175 1575 5775 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 5282.812 1950.000 3825 3450 3225 1575 3825 450
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 5445.833 6562.500 1950 4200 1275 7200 1950 8925
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 7595.833 6637.500 3825 4275 3150 6825 3825 9000
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 7 100 0 -1 0.000 0 1 1 0 8419.351 6558.096 5700 4200 5100 7950 5775 9000
+ 1 1 1.00 60.00 120.00
+5 1 0 2 0 0 100 0 -1 0.000 0 0 1 0 3332.812 2025.000 1875 3525 1275 1650 1875 525
+ 1 1 1.00 60.00 120.00
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 7425 2400 6975 1950 6975 1950 7425 2400 7425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 6675 2400 6225 1950 6225 1950 6675 2400 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 5925 2400 5475 1950 5475 1950 5925 2400 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 5175 2400 4725 1950 4725 1950 5175 2400 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 4425 2400 3975 1950 3975 1950 4425 2400 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 3675 2400 3225 1950 3225 1950 3675 2400 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 2175 2400 1725 1950 1725 1950 2175 2400 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 2400 2925 2400 2475 1950 2475 1950 2925 2400 2925
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7050 5775 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7125 5775 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 7200 5775 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6525 5775 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 6450 5775 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5700 5775 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5025 5775 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5625 5775 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2850 5775 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4350 5775 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 5550 5775 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4950 5775 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3600 5775 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4200 5775 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4800 5775 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3525 5775 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 4125 5775 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 3450 5775 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2775 5775 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2025 5775 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 2700 5775 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 4350 1950 5775 1950
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 2175 6225 1725 5775 1725 5775 2175 6225 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 2925 6225 2475 5775 2475 5775 2925 6225 2925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 3675 6225 3225 5775 3225 5775 3675 6225 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 4425 6225 3975 5775 3975 5775 4425 6225 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 5175 6225 4725 5775 4725 5775 5175 6225 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 5925 6225 5475 5775 5475 5775 5925 6225 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 6675 6225 6225 5775 6225 5775 6675 6225 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 6225 7425 6225 6975 5775 6975 5775 7425 6225 7425
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 1725 6000 1275
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7050 3825 5850
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7125 3825 6525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 7200 3825 7200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6525 3825 7125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 6450 3825 6450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5700 3825 5700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5025 3825 5625
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5625 3825 5025
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2850 3825 6300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4350 3825 5550
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 5550 3825 4350
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4950 3825 4950
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3600 3825 4800
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4200 3825 4200
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4800 3825 3600
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3525 3825 4125
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 4125 3825 3525
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 3450 3825 3450
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2775 3825 3375
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2025 3825 3300
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 2700 3825 2700
+2 1 0 3 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2
+ 3 1 1.00 60.00 120.00
+ 2400 1950 3825 1950
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 2175 4275 1725 3825 1725 3825 2175 4275 2175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 2925 4275 2475 3825 2475 3825 2925 4275 2925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 3675 4275 3225 3825 3225 3825 3675 4275 3675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 4425 4275 3975 3825 3975 3825 4425 4275 4425
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 5175 4275 4725 3825 4725 3825 5175 4275 5175
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 5925 4275 5475 3825 5475 3825 5925 4275 5925
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 6675 4275 6225 3825 6225 3825 6675 4275 6675
+2 2 0 3 -1 -1 0 0 -1 0.000 0 0 7 0 0 5
+ 4275 7425 4275 6975 3825 6975 3825 7425 4275 7425
+2 1 0 2 0 0 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4050 1725 4050 1275
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 1275 6225 825 5775 825 5775 1275 6225 1275
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 600 6225 150 5775 150 5775 600 6225 600
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 600 4275 150 3825 150 3825 600 4275 600
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 1275 4275 825 3825 825 3825 1275 4275 1275
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 7425 2175 7950
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 8400 2400 7950 1950 7950 1950 8400 2400 8400
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 8400 4275 7950 3825 7950 3825 8400 4275 8400
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 8400 6225 7950 5775 7950 5775 8400 6225 8400
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4050 7425 4050 7950
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6000 7425 6000 7950
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 9150 2400 8700 1950 8700 1950 9150 2400 9150
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 4275 9150 4275 8700 3825 8700 3825 9150 4275 9150
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 6225 9150 6225 8700 5775 8700 5775 9150 6225 9150
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 1200 2400 750 1950 750 1950 1200 2400 1200
+2 2 0 2 0 0 100 0 3 0.000 0 0 7 0 0 5
+ 2400 600 2400 150 1950 150 1950 600 2400 600
+2 1 0 2 0 7 100 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2175 1650 2175 1200
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/GR03~1.PDF
Binary file toolboxes/FullBNT-1.0.7/docs/GR03~1.PDF has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Talks/BNT_mathworks.ppt
Binary file toolboxes/FullBNT-1.0.7/docs/Talks/BNT_mathworks.ppt has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Talks/gR03.ppt
Binary file toolboxes/FullBNT-1.0.7/docs/Talks/gR03.ppt has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/Talks/stair_BNT_mathworks.ppt
Binary file toolboxes/FullBNT-1.0.7/docs/Talks/stair_BNT_mathworks.ppt has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/adj2pajek2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/adj2pajek2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,86 @@
+% ADJ2PAJEK2 Converts an adjacency matrix representation to a Pajek .net read format
+% adj2pajek2(adj, filename-stem, 'argname1', argval1, ...)
+%
+% Set A(i,j)=-1 to get a dotted line
+%
+% Optional arguments
+%
+% nodeNames - cell array, defaults to {'v1','v2,...}
+% shapes - cell array, defaults to {'ellipse','ellipse',...}
+% Choices are 'ellipse', 'box', 'diamond', 'triangle', 'cross', 'empty'
+% partition - vector of integers, defaults to [1 1 ... 1]
+% This will automatically color-code the vertices by their partition
+%
+% Run pajek (available from http://vlado.fmf.uni-lj.si/pub/networks/pajek/)
+% Choose File->Network->Read from the menu
+% Then press ctrl-G (Draw->Draw)
+% Optional: additionally load the partition file then press ctrl-P (Draw->partition)
+%
+% Examples
+% A=zeros(5,5);A(1,2)=-1;A(2,1)=-1;A(1,[3 4])=1;A(2,5)=1;
+% adj2pajek2(A,'foo') % makes foo.net
+%
+% adj2pajek2(A,'foo','partition',[1 1 2 2 2]) % makes foo.net and foo.clu
+%
+% adj2pajek2(A,'foo',...
+% 'nodeNames',{'TF1','TF2','G1','G2','G3'},...
+% 'shapes',{'box','box','ellipse','ellipse','ellipse'});
+%
+%
+% The file format is documented on p68 of the pajek manual
+% and good examples are on p58, p72
+%
+% Written by Kevin Murphy, 30 May 2007
+% Based on adj2pajek by Gergana Bounova
+% http://stuff.mit.edu/people/gerganaa/www/matlab/routines.html
+% Fixes a small bug (opens files as 'wt' instead of 'w' so it works in windows)
+% Also, simplified her code and added some features.
+
+function []=adj2pajek2(adj,filename, varargin)
+
+N = length(adj);
+for i=1:N
+ nodeNames{i} = strcat('"v',num2str(i),'"');
+ shapes{i} = 'ellipse';
+end
+
+[nodeNames, shapes, partition] = process_options(varargin, ...
+ 'nodeNames', nodeNames, 'shapes', shapes, 'partition', []);
+
+if ~isempty(partition)
+ fid = fopen(sprintf('%s.clu', filename),'wt','native');
+ fprintf(fid,'*Vertices %6i\n',N);
+ for i=1:N
+ fprintf(fid, '%d\n', partition(i));
+ end
+ fclose(fid);
+end
+
+fid = fopen(sprintf('%s.net', filename),'wt','native');
+
+fprintf(fid,'*Vertices %6i\n',N);
+for i=1:N
+ fprintf(fid,'%3i %s %s\n', i, nodeNames{i}, shapes{i});
+end
+
+%fprintf(fid,'*Edges\n');
+fprintf(fid,'*Arcs\n'); % directed
+for i=1:N
+ for j=1:N
+ if adj(i,j) ~= 0
+ fprintf(fid,' %4i %4i %2i\n',i,j,adj(i,j));
+ end
+ end
+end
+fclose(fid)
+
+
+if 0
+adj2pajek2(A,'foo',...
+ 'nodeNames',{'TF1','TF2','G1','G2','G3'},...
+ 'shapes',{'box','box','ellipse','ellipse','ellipse'});
+
+N = 100; part = ones(1,N); part(intersect(reg.tfidxTest,1:N))=2;
+G = reg.Atest(1:N, 1:N)';
+adj2pajek2(G, 'Ecoli100', 'partition', part)
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/bnsoftOld.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/bnsoftOld.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1078 @@
+
+Software Packages for Graphical Models / Bayesian Networks
+
+
+
+
+
+Software Packages for Graphical Models / Bayesian Networks
+
+
+Written by Kevin Murphy.
+
+Last updated 31 October 2005.
+
+
Remarks
+
+
+
+A much more detailed comparison of some of these software packages is
+available from Appendix B of
+Bayesian AI, by
+Ann Nicholson and Kevin Korb.
+This appendix is
+available
+here,
+and is based on the online comparison below.
+
+
+An online French
+version of this page is also available (not necessarily up-to-date).
+
+
+
+
+
+
+
What do the headers in the table mean?
+
+
+
Src = source code included? (N=no) If so, what language?
+
+
API = application program interface included?
+(N means the program cannot be integrated into your code, i.e., it
+must be run as a standalone executable.)
+
+
Exec = Executable runs on W = Windows (95/98/NT), U = Unix, M =
+Mac, or - = any machine with a compiler.
+
+
Cts = are continuous (latent) nodes supported?
+G = (conditionally) Gaussians nodes supported analytically,
+Cs = continuous nodes supported by sampling,
+Cd = continuous nodes supported by discretization,
+Cx = continuous nodes supported by some unspecified method,
+D = only discrete nodes supported.
+
+
+
GUI = Graphical User Interface included?
+
+
Learns parameters?
+
+
Learns structure? CI = means uses conditional independency tests
+
+
+
+
Free?
+0 = free (although possibly only for academic use).
+$ = commercial software (although most have free versions
+which are restricted in
+various ways, e.g., the model size is limited, or models cannot be
+saved, or there is no API.)
+
+
+
Undir?
+What kind of graphs are supported?
+U = only undirected graphs,
+D = only directed graphs,
+UD = both undirected and directed,
+CG = chain graphs (mixed directed/undirected).
+
+
+
Inference = which inference algorithm is used?
+jtree = junction tree,
+varelim = variable (bucket) elimination,
+MH = Metropols Hastings,
+G = Gibbs sampling,
+IS = importance sampling,
+sampling = some other Monte Carlo method,
+polytree = Pearl's algorithm restricted to a graph with no cycles,
+none = no inference supported (hence the program is only designed for
+structure learning from completely observed data)
+
+
Comments.
+If in "quotes", I am quoting the authors at their request.
+
+
BNT supports many types of
+conditional probability distributions (nodes),
+and it is easy to add more.
+
+
Tabular (multinomial)
+
Gaussian
+
Softmax (logistic/ sigmoid)
+
Multi-layer perceptron (neural network)
+
Noisy-or
+
Deterministic
+
+
+
+
BNT supports decision and utility nodes, as well as chance
+nodes,
+i.e., influence diagrams as well as Bayes nets.
+
+
+
BNT supports static and dynamic BNs (useful for modelling dynamical systems
+and sequence data).
+
+
+
BNT supports many different inference algorithms,
+and it is easy to add more.
+
+
+
Exact inference for static BNs:
+
+
junction tree
+
variable elimination
+
brute force enumeration (for discrete nets)
+
linear algebra (for Gaussian nets)
+
Pearl's algorithm (for polytrees)
+
quickscore (for QMR)
+
+
+
+
Approximate inference for static BNs:
+
+
likelihood weighting
+
Gibbs sampling
+
loopy belief propagation
+
+
+
+
Exact inference for DBNs:
+
+
junction tree
+
frontier algorithm
+
forwards-backwards (for HMMs)
+
Kalman-RTS (for LDSs)
+
+
+
+
Approximate inference for DBNs:
+
+
Boyen-Koller
+
factored-frontier/loopy belief propagation
+
+
+
+
+
+
+BNT supports several methods for parameter learning,
+and it is easy to add more.
+
+
+
Batch MLE/MAP parameter learning using EM.
+(Each node type has its own M method, e.g. softmax nodes use IRLS,
+and each inference engine has its own E method, so the code is fully modular.)
+
+
+I was hoping for a Linux-style effect, whereby people would contribute
+their own Matlab code so that the package would grow. With a few
+exceptions, this has not happened,
+although several people have provided bug-fixes (see the acknowledgements).
+Perhaps the Open
+Bayes Project will be more
+succesful in this regard, although the evidence to date is not promising.
+
+
+
+Knowing that someone else might read your code forces one to
+document it properly, a good practice in any case, as anyone knows who
+has revisited old code.
+In addition, by having many "eye balls", it is easier to spot bugs.
+
+
+
+
+I believe in the concept of
+
+reproducible research.
+Good science requires that other people be able
+to replicate your experiments.
+Often a paper does not give enough details about how exactly an
+algorithm was implemented (e.g., how were the parameters chosen? what
+initial conditions were used?), and these can make a big difference in
+practice.
+Hence one should release the code that
+was actually used to generate the results in one's paper.
+This also prevents re-inventing the wheel.
+
+
+
+I was fed up with reading papers where all people do is figure out how
+to do exact inference and/or learning
+in a model which is just a trivial special case of a general Bayes net, e.g.,
+input-output HMMs, coupled-HMMs, auto-regressive HMMs.
+My hope is that, by releasing general purpose software, the field can
+move on to more interesting questions.
+As Alfred North Whitehead said in 1911,
+"Civilization advances by extending the number of important operations
+that we can do without thinking about them."
+
+
+
+Matlab is an interactive, matrix-oriented programming language that
+enables one to express one's (mathematical) ideas very concisely and directly,
+without having to worry about annoying details like memory allocation
+or type checking. This considerably reduces development time and
+keeps code short, readable and fully portable.
+Matlab has excellent built-in support for many data analysis and
+visualization routines. In addition, there are many useful toolboxes, e.g., for
+neural networks, signal and image processing.
+The main disadvantages of Matlab are that it can be slow (which is why
+we are currently rewriting parts of BNT in C), and that the commercial
+license is expensive (although the student version is only $100 in the US).
+
+Many people ask me why I did not use
+Octave,
+an open-source Matlab clone.
+The reason is that
+Octave does not support multi-dimensional arrays,
+cell arrays, objects, etc.
+
+Click here for a more detailed
+comparison of matlab and other languages.
+
+
+
+
+
+I would like to thank numerous people for bug fixes, including:
+Rainer Deventer, Michael Robert James, Philippe Leray, Pedrito Maynard-Reid II, Andrew Ng,
+Ron Parr, Ilya Shpitser, Xuejing Sun, Ursula Sondhauss.
+
+I would like to thank the following people for contributing code:
+Pierpaolo Brutti, Ali Taylan Cemgil, Tamar Kushnir,
+Tom Murray,
+Nicholas Saunier,
+Ken Shan,
+Yair Weiss,
+Bob Welch,
+Ron Zohar.
+
+The following Intel employees have also contributed code:
+Qian Diao, Shan Huang, Yimin Zhang and especially Wei Hu.
+
+
+I would like to thank Stuart Russell for funding me over the years as
+I developed BNT, and Gary Bradksi for hiring me as an intern at Intel,
+which has supported much of the recent developments of BNT.
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/bnt_download.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/bnt_download.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+
BNT supports many types of
+conditional probability distributions (nodes),
+and it is easy to add more.
+
+
Tabular (multinomial)
+
Gaussian
+
Softmax (logistic/ sigmoid)
+
Multi-layer perceptron (neural network)
+
Noisy-or
+
Deterministic
+
+
+
+
BNT supports decision and utility nodes, as well as chance
+nodes,
+i.e., influence diagrams as well as Bayes nets.
+
+
+
BNT supports static and dynamic BNs (useful for modelling dynamical systems
+and sequence data).
+
+
+
BNT supports many different inference algorithms,
+and it is easy to add more.
+
+
+
Exact inference for static BNs:
+
+
junction tree
+
variable elimination
+
brute force enumeration (for discrete nets)
+
linear algebra (for Gaussian nets)
+
Pearl's algorithm (for polytrees)
+
quickscore (for QMR)
+
+
+
+
Approximate inference for static BNs:
+
+
likelihood weighting
+
Gibbs sampling
+
loopy belief propagation
+
+
+
+
Exact inference for DBNs:
+
+
junction tree
+
frontier algorithm
+
forwards-backwards (for HMMs)
+
Kalman-RTS (for LDSs)
+
+
+
+
Approximate inference for DBNs:
+
+
Boyen-Koller
+
factored-frontier/loopy belief propagation
+
+
+
+
+
+
+BNT supports several methods for parameter learning,
+and it is easy to add more.
+
+
+
Batch MLE/MAP parameter learning using EM.
+(Each node type has its own M method, e.g. softmax nodes use IRLS,
+and each inference engine has its own E method, so the code is fully modular.)
+
+
+I was hoping for a Linux-style effect, whereby people would contribute
+their own Matlab code so that the package would grow. With a few
+exceptions, this has not happened,
+although several people have provided bug-fixes (see the acknowledgements).
+Perhaps the Open
+Bayes Project will be more
+succesful in this regard, although the evidence to date is not promising.
+
+
+
+Knowing that someone else might read your code forces one to
+document it properly, a good practice in any case, as anyone knows who
+has revisited old code.
+In addition, by having many "eye balls", it is easier to spot bugs.
+
+
+
+
+I believe in the concept of
+
+reproducible research.
+Good science requires that other people be able
+to replicate your experiments.
+Often a paper does not give enough details about how exactly an
+algorithm was implemented (e.g., how were the parameters chosen? what
+initial conditions were used?), and these can make a big difference in
+practice.
+Hence one should release the code that
+was actually used to generate the results in one's paper.
+This also prevents re-inventing the wheel.
+
+
+
+I was fed up with reading papers where all people do is figure out how
+to do exact inference and/or learning
+in a model which is just a trivial special case of a general Bayes net, e.g.,
+input-output HMMs, coupled-HMMs, auto-regressive HMMs.
+My hope is that, by releasing general purpose software, the field can
+move on to more interesting questions.
+As Alfred North Whitehead said in 1911,
+"Civilization advances by extending the number of important operations
+that we can do without thinking about them."
+
+
+
+Matlab is an interactive, matrix-oriented programming language that
+enables one to express one's (mathematical) ideas very concisely and directly,
+without having to worry about annoying details like memory allocation
+or type checking. This considerably reduces development time and
+keeps code short, readable and fully portable.
+Matlab has excellent built-in support for many data analysis and
+visualization routines. In addition, there are many useful toolboxes, e.g., for
+neural networks, signal and image processing.
+The main disadvantages of Matlab are that it can be slow (which is why
+we are currently rewriting parts of BNT in C), and that the commercial
+license is expensive (although the student version is only $100 in the US).
+
+Many people ask me why I did not use
+Octave,
+an open-source Matlab clone.
+The reason is that
+Octave does not support multi-dimensional arrays,
+cell arrays, objects, etc.
+
+Click here for a more detailed
+comparison of matlab and other languages.
+
+
+
+
+
+I would like to thank numerous people for bug fixes, including:
+Rainer Deventer, Michael Robert James, Philippe Leray, Pedrito Maynard-Reid II, Andrew Ng,
+Ron Parr, Ilya Shpitser, Xuejing Sun, Ursula Sondhauss.
+
+I would like to thank the following people for contributing code:
+Pierpaolo Brutti, Ali Taylan Cemgil, Tamar Kushnir, Ken Shan,
+Yair Weiss,
+Ron Zohar.
+
+The following Intel employees have also contributed code:
+Qian Diao, Shan Huang, Yimin Zhang and especially Wei Hu.
+
+
+I would like to thank Stuart Russell for funding me over the years as
+I developed BNT, and Gary Bradksi for hiring me as an intern at Intel,
+which has supported much of the recent developments of BNT.
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/cellarray.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/cellarray.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+Cell arrays are a little tricky in Matlab.
+Consider this example.
+
+C=num2cell(rand(2,3))
+C =
+ [0.4565] [0.8214] [0.6154]
+ [0.0185] [0.4447] [0.7919]
+
+C{1,2} % this is the contents of this cell (could be a vector or a string)
+ans =
+ 0.8214
+
+C{1:2,2} % this is the contents of these cells - returns multiple
+answers!
+ans =
+ 0.8214
+ans =
+ 0.4447
+
+A = C(1:2,2) % this is a slice of the cell array
+ans =
+ [0.8214]
+ [0.4447]
+
+A{1} % A is itself a cell array
+ans =
+ 0.8214
+
+
+
+>> C(1:2,2)=0 % can't assign a scalar to a cell array
+C(1:2,2)=0
+??? Conversion to cell from double is not possible.
+
+
+>> C(1:2,2)={0;0} % can assign a cell array to a cell array
+C(1:2,2)={0;0}
+C =
+ [0.4565] [0] [0.6154]
+ [0.0185] [0] [0.7919]
+
+
+BTW, I use cell arrays for evidence for 2 reasons:
+
+1. [] indicates missing values
+2. it can easily represent vector-valued nodes
+
+The following example makes this clear
+
+C{1,1} = []
+C{2,1} = rand(3,1)
+
+C =
+ [] [0] [0.6154]
+ [3x1 double] [0] [0.7919]
+
+
+Hope this helps,
+Kevin
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/changelog.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/changelog.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1617 @@
+
History of changes to BNT
+
History of changes to BNT
+
+
+
Changes since 4 Oct 2007
+
+
+- 19 Oct 07 murphyk
+
+* BNT\CPDs\@noisyor_CPD\CPD_to_CPT.m: 2nd half of the file is a repeat
+of the first half and was deleted (thanks to Karl Kuschner)
+
+* KPMtools\myismember.m should return logical for use in "assert" so add line at end
+ p=logical(p); this prevents "assert" from failing on an integer input.
+(thanks to Karl Kuschner)
+
+
+
+- 17 Oct 07 murphyk
+
+* Updated subv2ind and ind2subv in KPMtools to Tom Minka's implementation.
+His ind2subv is faster (vectorized), but I had to modify it so it
+matched the behavior of my version when called with siz=[].
+His subv2inv is slightly simpler than mine because he does not treat
+the siz=[2 2 ... 2] case separately.
+Note: there is now no need to ever use the C versions of these
+functions (or any others, for that matter).
+
+* removed BNT/add_BNT_to_path since no longer needed.
+
+
+
+- 4 Oct 07 murphyk
+
+* moved code from sourceforge to UBC website, made version 1.0.4
+
+* @pearl_inf_engine/pearl_inf_engine line 24, default
+argument for protocol changed from [] to 'parallel'.
+Also, changed private/parallel_protocol so it doesn't write to an
+empty file id (Matlab 7 issue)
+
+* added foptions (Matlab 7 issue)
+
+* changed genpathKPM to exclude svn. Put it in toplevel directory to
+massively simplify the installation process.
+
+
+
+
+
Sourceforge changelog
+
+BNT was first ported to sourceforge on 28 July 2001 by yozhik.
+BNT was removed from sourceforge on 4 October 2007 by Kevin Murphy;
+that version is cached as FullBNT-1.0.3.zip.
+See Changelog from
+sourceforge for a history of that version of the code,
+which formed the basis of the branch currently on Murphy's web page.
+
+
+
Changes from August 1998 -- July 2004
+
+Kevin Murphy made the following changes to his own private copy.
+(Other small changes were made between July 2004 and October 2007, but were
+not documented.)
+These may or may not be reflected in the sourceforge version of the
+code (which was independently maintained).
+
+
+
+
9 June 2004
+
+
Changed tabular_CPD/learn_params back to old syntax, to make it
+compatible with gaussian_CPD/learn_params (and re-enabled
+generic_CPD/learn_params).
+Modified learning/learn_params.m and learning/score_family
+appropriately.
+(In particular, I undid the change Sonia Leach had to make to
+score_family to handle this asymmetry.)
+Added examples/static/gaussian2 to test this new functionality.
+
+
Added bp_mrf2 (for generic pairwise MRFs) to
+inference/static/@bp_belprop_mrf2_inf_engine. [MRFs are not
+"officially" supported in BNT, so this code is just for expert
+hackers.]
+
+
Added examples/static/nodeorderExample.m to illustrate importance
+of using topological ordering.
+
+
Ran dos2unix on all *.c files within BNT to eliminate compiler
+warnings.
+
+
+
+
7 June 2004
+
+
Replaced normaliseC with normalise in HMM/fwdback, for maximum
+portability (and negligible loss in speed).
+
Ensured FullBNT versions of HMM, KPMstats etc were as up-to-date
+as stand-alone versions.
+
Changed add_BNT_to_path so it no longer uses addpath(genpath()),
+which caused Old versions of files to mask new ones.
+
+
+
18 February 2004
+
+
A few small bug fixes to BNT, as posted to the Yahoo group.
+
Several new functions added to KPMtools, KPMstats and Graphviz
+(none needed by BNT).
+
Added CVS to some of my toolboxes.
+
+
+
30 July 2003
+
+
qian.diao fixed @mpot/set_domain_pot and @cgpot/set_domain_pot
+
Marco Grzegorczyk found, and Sonia Leach fixed, a bug in
+do_removal inside learn_struct_mcmc
+
+
+
+
28 July 2003
+
+
Sebastian Luehr provided 2 minor bug fixes, to HMM/fwdback (if any(scale==0))
+and FullBNT\HMM\CPDs\@hhmmQ_CPD\update_ess.m (wrong transpose).
+
+
+
8 July 2003
+
+
Removed buggy BNT/examples/static/MRF2/Old/mk_2D_lattice.m which was
+masking correct graph/mk_2D_lattice.
+
Fixed bug in graph/mk_2D_lattice_slow in the non-wrap-around case
+(line 78)
+
+
+
+
2 July 2003
+
+
Sped up normalize(., 1) in KPMtools by avoiding general repmat
+
Added assign_cols and marginalize_table to KPMtools
+
+
+
+
29 May 2003
+
+
Modified KPMstats/mixgauss_Mstep so it repmats Sigma in the tied
+covariance case (bug found by galt@media.mit.edu).
+
+
Bob Welch found bug in gaussian_CPDs/maximize_params in the way
+cpsz was computed.
+
+
Added KPMstats/mixgauss_em, because my code is easier to
+understand/modify than netlab's (at least for me!).
+
+
Modified BNT/examples/dynamic/viterbi1 to call multinomial_prob
+instead of mk_dhmm_obs_lik.
+
+
Moved parzen window and partitioned models code to KPMstats.
+
+
Rainer Deventer fixed some bugs in his scgpot code, as follows:
+1. complement_pot.m
+Problems occured for probabilities equal to zero. The result is an
+division by zero error.
+
+2. normalize_pot.m
+This function is used during the calculation of the log-likelihood.
+For a probability of zero a warning "log of zero" occurs. I have not
+realy fixed the bug. As a workaround I suggest to calculate the
+likelihhod based on realmin (the smallest real number) instead of
+zero.
+
+3. recursive_combine_pots
+At the beginning of the function there was no test for the trivial case,
+which defines the combination of two potentials as equal to the direct
+combination. The result might be an infinite recursion which leads to
+a stack overflow in matlab.
+
+
+
+
+
11 May 2003
+
+
Fixed bug in gaussian_CPD/maximize_params so it is compatible
+with the new clg_Mstep routine
+
Modified KPMstats/cwr_em to handle single cluster case
+separately.
+
Fixed bug in netlab/gmminit.
+
Added hash tables to KPMtools.
+
+
+
+
4 May 2003
+
+
+Renamed many functions in KPMstats so the name of the
+distribution/model type comes first,
+Mstep_clg -> clg_Mstep,
+Mstep_cond_gauss -> mixgauss_Mstep.
+Also, renamed eval_pdf_xxx functions to xxx_prob, e.g.
+eval_pdf_cond_mixgauss -> mixgauss_prob.
+This is simpler and shorter.
+
+
+Renamed many functions in HMM toolbox so the name of the
+distribution/model type comes first,
+log_lik_mhmm -> mhmm_logprob, etc.
+mk_arhmm_obs_lik has finally been re-implemented in terms of clg_prob
+and mixgauss_prob (for slice 1).
+Removed the Demos directory, and put them in the main directory.
+This code is not backwards compatible.
+
+
Removed some of the my_xxx functions from KPMstats (these were
+mostly copies of functions from the Mathworks stats toolbox).
+
+
+
Modified BNT to take into account changes to KPMstats and
+HMM toolboxes.
+
+
Fixed KPMstats/Mstep_clg (now called clg_Mstep) for spherical Gaussian case.
+(Trace was wrongly parenthesised, and I used YY instead of YTY.
+The spherical case now gives the same result as the full case
+for cwr_demo.)
+Also, mixgauss_Mstep now adds 0.01 to the ML estimate of Sigma,
+to act as a regularizer (it used to add 0.01 to E[YY'], but this was
+ignored in the spherical case).
+
+
Added cluster weighted regression to KPMstats.
+
+
Added KPMtools/strmatch_substr.
+
+
+
+
+
28 Mar 03
+
+
Added mc_stat_distrib and eval_pdf_cond_prod_parzen to KPMstats
+
Fixed GraphViz/arrow.m incompatibility with matlab 6.5
+(replace all NaN's with 0).
+Modified GraphViz/graph_to_dot so it also works on windows.
+
I removed dag_to_jtree and added graph_to_jtree to the graph
+toolbox; the latter expects an undirected graph as input.
+
I added triangulate_2Dlattice_demo.m to graph.
+
Rainer Deventer fixed the stable conditional Gaussian potential
+classes (scgpot and scgcpot) and inference engine
+(stab_cond_gauss_inf_engine).
+
Rainer Deventer added (stable) higher-order Markov models (see
+inference/dynamic/@stable_ho_inf_engine).
+
+
+
+
14 Feb 03
+
+
Simplified learning/learn_params so it no longer returns BIC
+score. Also, simplified @tabular_CPD/learn_params so it only takes
+local evidence.
+Added learn_params_dbn, which does ML estimation of fully observed
+DBNs.
+
Vectorized KPMstats/eval_pdf_cond_mixgauss for tied Sigma
+case (much faster!).
+Also, now works in log-domain to prevent underflow.
+eval_pdf_mixgauss now calls eval_pdf_cond_mixgauss and inherits these benefits.
+
add_BNT_to_path now calls genpath with 2 arguments if using
+matlab version 5.
+
+
+
+
30 Jan 03
+
+
Vectorized KPMstats/eval_pdf_cond_mixgauss for scalar Sigma
+case (much faster!)
+
Renamed mk_dotfile_from_hmm to draw_hmm and moved it to the
+GraphViz library.
+
Rewrote @gaussian_CPD/maximize_params.m so it calls
+KPMstats/Mstep_clg.
+This fixes bug when using clamped means (found by Rainer Deventer
+and Victor Eruhimov)
+and a bug when using a Wishart prior (no gamma term in the denominator).
+It is also easier to read.
+I rewrote the technical report re-deriving all the equations in a
+clearer notation, making the solution to the bugs more obvious.
+(See www.ai.mit.edu/~murphyk/Papers/learncg.pdf)
+Modified Mstep_cond_gauss to handle priors.
+
Fixed bug reported by Ramgopal Mettu in which add_BNT_to_path
+calls genpath with only 1 argument, whereas version 5 requires 2.
+
Fixed installC and uninstallC to search in FullBNT/BNT.
+
+
+
+
24 Jan 03
+
+
Major simplification of HMM code.
+The API is not backwards compatible.
+No new functionality has been added, however.
+There is now only one fwdback function, instead of 7;
+different behaviors are controlled through optional arguments.
+I renamed 'evaluate observation likelihood' (local evidence)
+to 'evaluate conditional pdf', since this is more general.
+i.e., renamed
+mk_dhmm_obs_lik to eval_pdf_cond_multinomial,
+mk_ghmm_obs_lik to eval_pdf_cond_gauss,
+mk_mhmm_obs_lik to eval_pdf_cond_mog.
+These functions have been moved to KPMstats,
+so they can be used by other toolboxes.
+ghmm's have been eliminated, since they are just a special case of
+mhmm's with M=1 mixture component.
+mixgauss HMMs can now handle a different number of
+mixture components per state.
+init_mhmm has been eliminated, and replaced with init_cond_mixgauss
+(in KPMstats) and mk_leftright/rightleft_transmat.
+learn_dhmm can no longer handle inputs (although this is easy to add back).
+
+
+
+
+
+
+
20 Jan 03
+
+
Added arrow.m to GraphViz directory, and commented out line 922,
+in response to a bug report.
+
+
+
18 Jan 03
+
+
Major restructuring of BNT file structure:
+all code that is not specific to Bayes nets has been removed;
+these packages must be downloaded separately. (Or just download FullBNT.)
+This makes it easier to ensure different toolboxes are consistent.
+misc has been slimmed down and renamed KPMtools, so it can be shared by other toolboxes,
+such as HMM and Kalman; some of the code has been moved to BNT/general.
+The Graphics directory has been slimmed down and renamed GraphViz.
+The graph directory now has no dependence on BNT (dag_to_jtree has
+been renamed graph_to_jtree and has a new API).
+netlab2 no longer contains any netlab files, only netlab extensions.
+None of the functionality has changed.
+
+
+
+
+
11 Jan 03
+
+
jtree_dbn_inf_engine can now support soft evidence.
+
+
Rewrote graph/dfs to make it clearer.
+Return arguments have changed, as has mk_rooted_tree.
+The acyclicity check for large undirected graphs can cause a stack overflow.
+It turns out that this was not a bug, but is because Matlab's stack depth
+bound is very low by default.
+
+
Renamed examples/dynamic/filter2 to filter_test1, so it does not
+conflict with the filter2 function in the image processing toolbox.
+
+
Ran test_BNT on various versions of matlab to check compatibility.
+On matlab 6.5 (r13), elapsed time = 211s, cpu time = 204s.
+On matlab 6.1 (r12), elapsed time = 173s, cpu time = 164s.
+On matlab 5.3 (r11), elapsed time = 116s, cpu time = 114s.
+So matlab is apparently getting slower with time!!
+(All results were with a linux PIII machine.)
+
+
+
+
14 Nov 02
+
+
Removed all ndx inference routines, since they are only
+marginally faster on toy problems,
+and are slower on large problems due to having to store and lookup
+the indices (causes cache misses).
+In particular, I removed jtree_ndx_inf_eng and jtree_ndx_dbn_inf_eng, all the *ndx*
+routines from potentials/Tables, and all the UID stuff from
+add_BNT_to_path,
+thus simplifying the code.
+This required fixing hmm_(2TBN)_inf_engine/marginal_nodes\family,
+and updating installC.
+
+
+
Removed jtree_C_inf_engine and jtree_C_dbn_inf_engine.
+The former is basically the same as using jtree_inf_engine with
+mutliply_by_table.c and marginalize_table.c.
+The latter benefited slightly by assuming potentials were tables
+(arrays not objects), but these negligible savings don't justify the
+complexity and code duplication.
+
+
Removed stab_cond_gauss_inf_engine and
+scg_unrolled_dbn_inf_engine,
+written by shan.huang@intel.com, since the code was buggy.
+
+
Removed potential_engine, which was only experimental anyway.
+
+
+
+
+
+
13 Nov 02
+
+
Released version 5.
+The previous version, released on 7/28/02, is available
+here.
+
+
Moved code and documentation to MIT.
+
+
Added repmat.c from Thomas Minka's lightspeed library.
+Modified it so it can return an empty matrix.
+
+
Tomas Kocka fixed bug in the BDeu option for tabular_CPD,
+and contributed graph/dag_to_eg, to convert to essential graphs.
+
+
+
+
Modified definition of hhmmQ_CPD, so that Qps can now accept
+parents in either the current or previous slice.
+
+
Added hhmm2Q_CPD class, which is simpler than hhmmQ (no embedded
+sub CPDs, etc), and which allows the conditioning parents, Qps, to
+be before (in the topological ordering) the F or Q(t-1) nodes.
+See BNT/examples/dynamic/HHMM/Map/mk_map_hhmm for an example.
+
+
+
+
7/28/02
+
+
Changed graph/best_first_elim_order from min-fill to min-weight.
+
Ernest Chan fixed bug in Kalman/sample_lds (G{i} becomes G{m} in
+line 61).
+
Tal Blum fixed bug in HMM/init_ghmm (Q
+becomes K, the number of states).
+
Fixed jtree_2tbn_inf_engine/set_fields so it correctly sets the
+maximize flag to 1 even in subengines.
+
Gary Bradksi did a simple mod to the PC struct learn alg so you can pass it an
+adjacency matrix as a constraint. Also, CovMat.m reads a file and
+produces a covariance matrix.
+
KNOWN BUG in CPDs/@hhmmQ_CPD/update_ess.m at line 72 caused by
+examples/dynamic/HHMM/Square/learn_square_hhmm_cts.m at line 57.
+
+The old version is available from www.cs.berkeley.edu/~murphyk/BNT.24june02.zip
+
+
+
+
6/24/02
+
+
Renamed dag_to_dot as graph_to_dot and added support for
+undirected graphs.
+
Changed syntax for HHMM CPD constructors: no need to specify d/D
+anymore,so they can be used for more complex models.
+
Removed redundant first argument to mk_isolated_tabular_CPD.
+
+
+
+
6/19/02
+
+
+Fixed most probable explanation code.
+Replaced calc_mpe with find_mpe, which is now a method of certain
+inference engines, e.g., jtree, belprop.
+calc_mpe_global has become the find_mpe method of global_joint.
+calc_mpe_bucket has become the find_mpe method of var_elim.
+calc_mpe_dbn has become the find_mpe method of smoother.
+These routines now correctly find the jointly most probable
+explanation, instead of the marginally most probable assignments.
+See examples/static/mpe1\mpe2 and examples/dynamic/viterbi1
+for examples.
+Removed maximize flag from constructor and enter_evidence
+methods, since this no longer needs to be specified by the user.
+
+
Rainer Deventer fixed in a bug in
+CPDs/@gaussian_CPD/udpate_ess.m:
+now, hidden_cps = any(hidden_bitv(cps)), whereas it used to be
+hidden_cps = all(hidden_bitv(cps)).
+
+
+
+
+
5/29/02
+
+
CPDs/@gaussian_CPD/udpate_ess.m fixed WX,WXX,WXY (thanks to Rainer Deventer and
+Yohsuke Minowa for spotting the bug). Does the C version work??
+
potentials/@cpot/mpot_to_cpot fixed K==0 case (thanks to Rainer Deventer).
+
CPDs/@gaussian_CPD/log_prob_node now accepts non-cell array data
+on self (thanks to rishi for catching this).
+
+
+
+
5/19/02
+
+
+
+
+
Wei Hu made the following changes.
+
+
Memory leak repair:
+ a. distribute_evidence.c in static/@jtree_C directory
+ b. distribute_evidence.c in static/@jtree_ndx directory
+ c. marg_tablec. in Tables dir
+
+
Add "@jtree_ndx_2TBN_inf_engine" in inference/online dir
+
+
Add "@jtree_sparse_inf_engine" in inference/static dir
+
+
Add "@jtree_sparse_2TBN_inf_engine" in inference/online dir
+
+
Modify "tabular_CPD.m" in CPDs/@tabular_CPD dir , used for sparse
+
+
In "@discrete_CPD" dir:
+ a. modify "convert_to_pot.m", used for sparse
+ b. add "convert_to_sparse_table.c"
+
+
In "potentials/@dpot" dir:
+ a. remove "divide_by_pot.c" and "multiply_by_pot.c"
+ b. add "divide_by_pot.m" and "multiply_by_pot.m"
+ c. modify "dpot.m", "marginalize_pot.m" and "normalize_pot.m"
+
+
In "potentials/Tables" dir:
+ a. modify mk_ndxB.c;(for speedup)
+ b. add "mult_by_table.m",
+ "divide_by_table.m",
+ "divide_by_table.c",
+ "marg_sparse_table.c",
+ "mult_by_sparse_table.c",
+ "divide_by_sparse_table.c".
+
+
Modify "normalise.c" in misc dir, used for sparse.
+
+
And, add discrete2, discrete3, filter2 and filter3 as test applications in test_BNT.m
+Modify installC.m
+
+
+
Kevin made the following changes related to strong junction
+trees:
+
+
jtree_inf_engin line 75:
+engine.root_clq = length(engine.cliques);
+the last clq is guaranteed to be a strong root
+
+
strong_elim_order: use Ilya's code instead of topological sorting.
+
+
+
Kevin fixed CPDs/@generic_CPD/learn_params, so it always passes
+in the correct hidden_bitv field to update_params.
+
+
.
+
+
+
5/8/02
+
+
+
Jerod Weinman helped fix some bugs in HHMMQ_CPD/maximize_params.
+
+
Removed broken online inference from hmm_inf_engine.
+It has been replaced filter_inf_engine, which can take hmm_inf_engine
+as an argument.
+
+
Changed graph visualization function names.
+'draw_layout' is now 'draw_graph',
+'draw_layout_dbn' is now 'draw_dbn',
+'plotgraph' is now 'dag_to_dot',
+'plothmm' is now 'hmm_to_dot',
+added 'dbn_to_dot',
+'mkdot' no longer exists': its functioality has been subsumed by dag_to_dot.
+The dot functions now all take optional args in string/value format.
+
+
+
+
4/1/02
+
+
Added online inference classes.
+See BNT/inference/online and BNT/examples/dynamic/filter1.
+This is work in progress.
+
Renamed cmp_inference to cmp_inference_dbn, and made its
+ interface and behavior more similar to cmp_inference_static.
+
Added field rep_of_eclass to bnet and dbn, to simplify
+parameter tying (see ~murphyk/Bayes/param_tieing.html).
+
Added gmux_CPD (Gaussian mulitplexers).
+See BNT/examples/dynamic/SLAM/skf_data_assoc_gmux for an example.
+
Modified the forwards sampling routines.
+general/sample_dbn and sample_bnet now take optional arguments as
+strings, and can sample with pre-specified evidence.
+sample_bnet can only generate a single sample, and it is always a cell
+array.
+sample_node can only generate a single sample, and it is always a
+scalar or vector.
+This eliminates the false impression that the function was
+ever vectorized (which was only true for tabular_CPDs).
+(Calling sample_bnet inside a for-loop is unlikely to be a bottleneck.)
+
Updated usage.html's description of CPDs (gmux) and inference
+(added gibbs_sampling and modified the description of pearl).
+
Modified BNT/Kalman/kalman_filter\smoother so they now optionally
+take an observed input (control) sequence.
+Also, optional arguments are now passed as strings.
+
Removed BNT/examples/static/uci_data to save space.
+
+
+
3/14/02
+
+
pearl_inf_engine now works for (vector) Gaussian nodes, as well
+as discrete. compute_pi has been renamed CPD_to_pi. compute_lambda_msg
+ has been renamed CPD_to_lambda_msg. These are now implemented for
+ the discrete_CPD class instead of tabular_CPD. noisyor and
+ Gaussian have their own private implemenations.
+Created examples/static/Belprop subdirectory.
+
Added examples/dynamic/HHMM/Motif.
+
Added Matt Brand's entropic prior code.
+
cmp_inference_static has changed. It no longer returns err. It
+ can check for convergence. It can accept 'observed'.
+
+
+
+
3/4/02
+
+
Fixed HHMM code. Now BNT/examples/dynamic/HHMM/mk_abcd_hhmm
+implements the example in the NIPS paper. See also
+Square/sample_square_hhmm_discrete and other files.
+
+
Included Bhaskara Marthi's gibbs_sampling_inf_engine. Currently
+this only works if all CPDs are tabular and if you call installC.
+
+
Modified Kalman/tracking_demo so it calls plotgauss2d instead of
+ gaussplot.
+
+
Included Sonia Leach's speedup of mk_rnd_dag.
+My version created all NchooseK subsets, and then picked among them. Sonia
+reorders the possible parents randomly and choose
+the first k. This saves on having to enumerate the large number of
+possible subsets before picking from one.
+
+
Eliminated BNT/inference/static/Old, which contained some old
+.mexglx files which wasted space.
+
+
+
+
+
2/15/02
+
+
Removed the netlab directory, since most of it was not being
+used, and it took up too much space (the goal is to have BNT.zip be
+less than 1.4MB, so if fits on a floppy).
+The required files have been copied into netlab2.
+
+
+
2/14/02
+
+
Shan Huang fixed most (all?) of the bugs in his stable CG code.
+scg1-3 now work, but scg_3node and scg_unstable give different
+ behavior than that reported in the Cowell book.
+
+
I changed gaussplot so it plots an ellipse representing the
+ eigenvectors of the covariance matrix, rather than numerically
+ evaluating the density and using a contour plot; this
+ is much faster and gives better pictures. The new function is
+ called plotgauss2d in BNT/Graphics.
+
+
Joni Alon fixed some small bugs:
+ mk_dhmm_obs_lik called forwards with the wrong args, and
+ add_BNT_to_path should quote filenames with spaces.
+
+
I added BNT/stats2/myunidrnd which is called by learn_struct_mcmc.
+
+
I changed BNT/potentials/@dpot/multiply_by_dpot so it now says
+Tbig.T(:) = Tbig.T(:) .* Ts(:);
+
+
+
+
2/6/02
+
+
Added hierarchical HMMs. See BNT/examples/dynamic/HHMM and
+CPDs/@hhmmQ_CPD and @hhmmF_CPD.
+
sample_dbn can now sample until a certain condition is true.
+
Sonia Leach fixed learn_struct_mcmc and changed mk_nbrs_of_digraph
+so it only returns DAGs.
+Click here for details of her changes.
+
+
+
+
2/4/02
+
+
Wei Hu fixed a bug in
+jtree_ndx_inf_engine/collect\distribute_evidence.c which failed when
+maximize=1.
+
+I fixed various bugs to do with conditional Gaussians,
+so mixexp3 now works (thansk to Gerry Fung
+ for spotting the error). Specifically:
+Changed softmax_CPD/convert_to_pot so it now puts cts nodes in cdom, and no longer inherits
+ this function from discrete_CPD.
+ Changed root_CPD/convert_to_put so it puts self in cdom.
+
+
+
+
1/31/02
+
+
Fixed log_lik_mhmm (thanks to ling chen
+for spotting the typo)
+
Now many scripts in examples/static call cmp_inference_static.
+Also, SCG scripts have been simplified (but still don't work!).
+
belprop and belprop_fg enter_evidence now returns [engine, ll,
+ niter], with ll=0, so the order of the arguments is compatible with other engines.
+
Ensured that all enter_evidence methods support optional
+ arguments such as 'maximize', even if they ignore them.
+
Added Wei Hu's potentials/Tables/rep_mult.c, which is used to
+ totally eliminate all repmats from gaussian_CPD/update_ess.
+
+
+
+
1/30/02
+
+
update_ess now takes hidden_bitv instead of hidden_self and
+hidden_ps. This allows gaussian_CPD to distinguish hidden discrete and
+cts parents. Now learn_params_em, as well as learn_params_dbn_em,
+ passes in this info, for speed.
+
+
gaussian_CPD update_ess is now vectorized for any case where all
+ the continuous nodes are observed (eg., Gaussian HMMs, AR-HMMs).
+
+
mk_dbn now automatically detects autoregressive nodes.
+
+
hmm_inf_engine now uses indexes in marginal_nodes/family for
+ speed. Marginal_ndoes can now only handle single nodes.
+ (SDndx is hard-coded, to avoid the overhead of using marg_ndx,
+ which is slow because of the case and global statements.)
+
+
add_ev_to_dmarginal now retains the domain field.
+
+
Wei Hu wrote potentials/Tables/repmat_and_mult.c, which is used to
+ avoid some of the repmat's in gaussian_CPD/update_ess.
+
+
installC now longer sets the global USEC, since USEC is set to 0
+ by add_BNT_to_path, even if the C files have already been compiled
+ in a previous session. Instead, gaussian_CPD checks to
+ see if repmat_and_mult exists, and (bat1, chmm1, water1, water2)
+ check to see if jtree_C_inf_engine/collect_evidence exists.
+ Note that checking if a file exists is slow, so we do the check
+ inside the gaussian_CPD constructor, not inside update_ess.
+
+
uninstallC now deletes both .mex and .dll files, just in case I
+ accidently ship a .zip file with binaries. It also deletes mex
+ files from jtree_C_inf_engine.
+
+
Now marginal_family for both jtree_limid_inf_engine and
+ global_joint_inf_engine returns a marginal structure and
+ potential, as required by solve_limid.
+ Other engines (eg. jtree_ndx, hmm) are not required to return a potential.
+
+
+
+
+
1/22/02
+
+
Added an optional argument to mk_bnet and mk_dbn which lets you
+add names to nodes. This uses the new assoc_array class.
+
+
Added Yimin Zhang's (unfinished) classification/regression tree
+code to CPDs/tree_CPD.
+
+
+
+
+
+
1/14/02
+
+
Incorporated some of Shan Huang's (still broken) stable CG code.
+
+
+
+
1/9/02
+
+
Yimin Zhang vectorized @discrete_CPD/prob_node, which speeds up
+structure learning considerably. I fixed this to handle softmax CPDs.
+
+
Shan Huang changed the stable conditional Gaussian code to handle
+vector-valued nodes, but it is buggy.
+
+
I vectorized @gaussian_CPD/update_ess for a special case.
+
+
Removed denom=min(1, ... Z) from gaussian_CPD/maximize_params
+(added to cope with negative temperature for entropic prior), which
+gives wrong results on mhmm1.
+
+
+
+
1/7/02
+
+
+
Removed the 'xo' typo from mk_qmr_bnet.
+
+
convert_dbn_CPDs_to_tables has been vectorized; it is now
+substantially faster to compute the conditional likelihood for long sequences.
+
+
Simplified constructors for tabular_CPD and gaussian_CPD, so they
+now both only take the form CPD(bnet, i, ...) for named arguments -
+the CPD('self', i, ...) format is gone. Modified mk_fgraph_given_ev
+to use mk_isolated_tabular_CPD instead.
+
+
Added entropic prior to tabular and Gaussian nodes.
+For tabular_CPD, changed name of arguments to the constructor to
+distinguish Dirichlet and entropic priors. In particular,
+tabular_CPD(bnet, i, 'prior', 2) is now
+tabular_CPD(bnet, i, 'prior_type', 'dirichlet', 'dirichlet_weight', 2).
+
+
Added deterministic annealing to learn_params_dbn_em for use with
+entropic priors. The old format learn(engine, cases, max_iter) has
+been replaced by learn(engine, cases, 'max_iter', max_iter).
+
+
Changed examples/dynamic/bat1 and kjaerulff1, since default
+equivalence classes have changed from untied to tied.
+
+
+
12/30/01
+
+
DBN default equivalence classes for slice 2 has changed, so that
+now parameters are tied for nodes with 'equivalent' parents in slices
+1 and 2 (e.g., observed leaf nodes). This essentially makes passing in
+the eclass arguments redundant (hooray!).
+
+
+
+
12/20/01
+
+
Released version 4.
+Version 4 is considered a major new release
+since it is not completely backwards compatible with V3.
+Observed nodes are now specified when the bnet/dbn is created,
+not when the engine is created. This changes the interface to many of
+the engines, making the code no longer backwards compatible.
+Hence support for non-named optional arguments (BNT2 style) has also
+been removed; hence mk_dbn etc. requires arguments to be passed by name.
+
+
Ilya Shpitser's C code for triangulation now compiles under
+Windows as well as Unix, thanks to Wei Hu.
+
+
All the ndx engines have been combined, and now take an optional
+argument specifying what kind of index to use.
+
+
learn_params_dbn_em is now more efficient:
+@tabular_CPD/update_ess for nodes whose families
+are hidden does not need need to call add_evidence_to_dmarginal, which
+is slow.
+
+
Wei Hu fixed bug in jtree_ndxD, so now the matlab and C versions
+both work.
+
+
dhmm_inf_engine replaces hmm_inf_engine, since the former can
+handle any kind of topology and is slightly more efficient. dhmm is
+extended to handle Gaussian, as well as discrete,
+observed nodes. The new hmm_inf_engine no longer supports online
+inference (which was broken anyway).
+
+
Added autoregressive HMM special case to hmm_inf_engine for
+speed.
+
+
jtree_ndxSD_dbn_inf_engine now computes likelihood of the
+evidence in a vectorized manner, where possible, just like
+hmm_inf_engine.
+
+
Added mk_limid, and hence simplified mk_bnet and mk_dbn.
+
+
+
Gaussian_CPD now uses 0.01*I prior on covariance matrix by
+default. To do ML estimation, set 'cov_prior_weight' to 0.
+
+
Gaussian_CPD and tabular_CPD
+optional binary arguments are now set using 0/1 rather no 'no'/'yes'.
+
+
Removed Shan Huang's PDAG and decomposable graph code, which will
+be put in a separate structure learning library.
+
+
+
+
12/11/01
+
+
Wei Hu fixed jtree_ndx*_dbn_inf_engine and marg_table.c.
+
+
Shan Huang contributed his implementation of stable conditional
+Gaussian code (Lauritzen 1999), and methods to search through the
+space of PDAGs (Markov equivalent DAGs) and undirected decomposable
+graphs. The latter is still under development.
+
+
+
+
12/10/01
+
+
Included Wei Hu's new versions of the ndx* routines, which use
+integers instead of doubles. The new versions are about 5 times faster
+in C. In general, ndxSD is the best choice.
+
+
Fixed misc/add_ev_to_dmarginal so it works with the ndx routines
+in bat1.
+
+
Added calc_mpe_dbn to do Viterbi parsing.
+
+
Updated dhmm_inf_engine so it computes marginals.
+
+
+
+
+
11/23/01
+
+
learn_params now does MAP estimation (i.e., uses Dirichlet prior,
+if define). Thanks to Simon Keizer skeizer@cs.utwente.nl for spotting
+this.
+
Changed plotgraph so it calls ghostview with the output of dotty,
+instead of converting from .ps to .tif. The resulting image is much
+easier to read.
+
Fixed cgpot/multiply_by_pots.m.
+
Wei Hu fixed ind2subv.c.
+
Changed arguments to compute_joint_pot.
+
+
+
+
11/1/01
+
+
Changed sparse to dense in @dpot/multiply_pots, because sparse
+arrays apparently cause a bug in the NT version of Matlab.
+
+
Fixed the bug in gaussian_CPD/log_prob_node.m which
+incorrectly called the vectorized gaussian_prob with different means
+when there were continuous parents and more than one case.
+(Thanks to Dave Andre for finding this.)
+
+
Fixed the bug in root_CPD/convert_to_pot which did not check for
+pot_type='g'.
+(Thanks to Dave Andre for finding this.)
+
+
Changed calc_mpe and calc_mpe_global so they now return a cell array.
+
+
Combine pearl and loopy_pearl into a single inference engine
+called 'pearl_inf_engine', which now takes optional arguments passed
+in using the name/value pair syntax.
+marginal_nodes/family now takes the optional add_ev argument (same as
+jtree), which is the opposite of the previous shrink argument.
+
+
Created pearl_unrolled_dbn_inf_engine and "resurrected"
+pearl_dbn_inf_engine in a simplified (but still broken!) form.
+
+
Wei Hi fixed the bug in ind2subv.c, so now ndxSD works.
+He also made C versions of ndxSD and ndxB, and added (the unfinished) ndxD.
+
+
+
+
+
10/20/01
+
+
+
Removed the use_ndx option from jtree_inf,
+and created 2 new inference engines: jtree_ndxSD_inf_engine and
+jtree_ndxB_inf_engine.
+The former stores 2 sets of indices for the small and difference
+domains; the latter stores 1 set of indices for the big domain.
+In Matlab, the ndxB version is often significantly faster than ndxSD
+and regular jree, except when the clique size is large.
+When compiled to C, the difference between ndxB and ndxSD (in terms of
+speed) vanishes; again, both are faster than compiled jtree, except
+when the clique size is large.
+Note: ndxSD currently has a bug in it, so it gives the wrong results!
+(The DBN analogs are jtree_dbn_ndxSD_inf_engine and
+jtree_dbn_ndxB_inf_engine.)
+
+
Removed duplicate files from the HMM and Kalman subdirectories.
+e.g., normalise is now only in BNT/misc, so when compiled to C, it
+masks the unique copy of the Matlab version.
+
+
+
+
+
10/17/01
+
+
Fixed bugs introduced on 10/15:
+Renamed extract_gaussian_CPD_params_given_ev_on_dps.m to
+gaussian_CPD_params_given_dps.m since Matlab can't cope with such long
+names (this caused cg1 to fail). Fixed bug in
+gaussian_CPD/convert_to_pot, which now calls convert_to_table in the
+discrete case.
+
+
Fixed bug in bk_inf_engine/marginal_nodes.
+The test 'if nodes < ss' is now
+'if nodes <= ss' (bug fix due to Stephen seg_ma@hotmail.com)
+
+
Simplified uninstallC.
+
+
+
+
10/15/01
+
+
+
Added use_ndx option to jtree_inf and jtree_dbn_inf.
+This pre-computes indices for multiplying, dividing and marginalizing
+discrete potentials.
+This is like the old jtree_fast_inf_engine, but we use an extra level
+of indirection to reduce the number of indices needed (see
+uid_generator object).
+Sometimes this is faster than the original way...
+This is work in progress.
+
+
The constructor for dpot no longer calls myreshape, which is very
+slow.
+But new dpots still must call myones.
+Hence discrete potentials are only sometimes 1D vectors (but should
+always be thought of as multi-D arrays). This is work in progress.
+
+
+
+
10/6/01
+
+
Fixed jtree_dbn_inf_engine, and added kjaerulff1 to test this.
+
Added option to jtree_inf_engine/marginal_nodes to return "full
+sized" marginals, even on observed nodes.
+
Clustered BK in examples/dynamic/bat1 seems to be broken,
+so it has been commented out.
+BK will be re-implemented on top of jtree_dbn, which should much more
+efficient.
+
+
+
9/25/01
+
+
jtree_dbn_inf_engine is now more efficient than calling BK with
+clusters = exact, since it only uses the interface nodes, instead of
+all of them, to maintain the belief state.
+
Uninstalled the broken C version of strong_elim_order.
+
Changed order of arguments to unroll_dbn_topology, so that intra1
+is no longer required.
+
Eliminated jtree_onepass, which can be simulated by calling
+collect_evidence on jtree.
+
online1 is no longer in the test_BNT suite, since there is some
+problem with online prediction with mixtures of Gaussians using BK.
+This functionality is no longer supported, since doing it properly is
+too much work.
+
+
+
+
9/7/01
+
+
Added Ilya Shpitser's C triangulation code (43x faster!).
+Currently this only compiles under linux; windows support is being added.
+
+
+
+
9/5/01
+
+
Fixed typo in CPDs/@tabular_kernel/convert_to_table (thanks,
+Philippe!)
+
Fixed problems with clamping nodes in tabular_CPD, learn_params,
+learn_params_tabular, and bayes_update_params. See
+BNT/examples/static/learn1 for a demo.
+
+
+
+
9/3/01
+
+
Fixed typo on line 87 of gaussian_CPD which caused error in cg1.m
+
Installed Wei Hu's latest version of jtree_C_inf_engine, which
+can now compute marginals on any clique/cluster.
+
Added Yair Weiss's code to compute the Bethe free energy
+approximation to the log likelihood in loopy_pearl (still need to add
+this to belprop). The return arguments are now: engine, loglik and
+niter, which is different than before.
+
+
+
+
+
8/30/01
+
+
Fixed bug in BNT/examples/static/id1 which passed hard-coded
+directory name to belprop_inf_engine.
+
+
Changed tabular_CPD and gaussian_CPD so they can now be created
+without having to pass in a bnet.
+
+
Finished mk_fgraph_given_ev. See the fg* files in examples/static
+for demos of factor graphs (work in progress).
+
+
+
+
+
8/22/01
+
+
+
Removed jtree_compiled_inf_engine,
+since the C code it generated was so big that it would barf on large
+models.
+
+
Tidied up the potentials/Tables directory.
+Removed mk_marg/mult_ndx.c,
+which have been superceded by the much faster mk_marg/mult_index.c
+(written by Wei Hu).
+Renamed the Matlab versions mk_marginalise/multiply_table_ndx.m
+to be mk_marg/mult_index.m to be compatible with the C versions.
+Note: nobody calls these routines anymore!
+(jtree_C_inf_engine/enter_softev.c has them built-in.)
+Removed mk_ndx.c, which was only used by jtree_compiled.
+Removed mk_cluster_clq_ndx.m, mk_CPD_clq_ndx, and marginalise_table.m
+which were not used.
+Moved shrink_obs_dims_in_table.m to misc.
+
+
In potentials/@dpot directory: removed multiply_by_pot_C_old.c.
+Now marginalize_pot.c can handle maximization,
+and divide_by_pot.c has been implmented.
+marginalize/multiply/divide_by_pot.m no longer have useC or genops options.
+(To get the C versions, use installC.m)
+
+
Removed useC and genops options from jtree_inf_engine.m
+To use the C versions, install the C code.
+
+
Updated BNT/installC.m.
+
+
Added fclose to @loopy_pearl_inf/enter_evidence.
+
+
Changes to MPE routines in BNT/general.
+The maximize parameter is now specified inside enter_evidence
+instead of when the engine is created.
+Renamed calc_mpe_given_inf_engine to just calc_mpe.
+Added Ron Zohar's optional fix to handle the case of ties.
+Now returns log-likelihood instead of likelihood.
+Added calc_mpe_global.
+Removed references to genops in calc_mpe_bucket.m
+Test file is now called mpe1.m
+
+
For DBN inference, filter argument is now passed by name,
+as is maximize. This is NOT BACKWARDS COMPATIBLE.
+
+
Removed @loopy_dbn_inf_engine, which will was too complicated.
+In the future, a new version, which applies static loopy to the
+unrolled DBN, will be provided.
+
+
discrete_CPD class now contains the family sizes and supports the
+method dom_sizes. This is because it could not access the child field
+CPD.sizes, and mysize(CPT) may give the wrong answer.
+
+
Removed all functions of the form CPD_to_xxx, where xxx = dpot, cpot,
+cgpot, table, tables. These have been replaced by convert_to_pot,
+which takes a pot_type argument.
+@discrete_CPD calls convert_to_table to implement a default
+convert_to_pot.
+@discrete_CPD calls CPD_to_CPT to implement a default
+convert_to_table.
+The convert_to_xxx routines take fewer arguments (no need to pass in
+the globals node_sizes and cnodes!).
+Eventually, convert_to_xxx will be vectorized, so it will operate on
+all nodes in the same equivalence class "simultaneously", which should
+be significantly quicker, at least for Gaussians.
+
+
Changed discrete_CPD/sample_node and prob_node to use
+convert_to_table, instead of CPD_to_CPT, so mlp/softmax nodes can
+benefit.
+
+
Removed @tabular_CPD/compute_lambda_msg_fast and
+private/prod_CPD_and_pi_msgs_fast, since no one called them.
+
+
Renamed compute_MLE to learn_params,
+by analogy with bayes_update_params (also because it may compute an
+MAP estimate).
+
+
Renamed set_params to set_fields
+and get_params to get_field for CPD and dpot objects, to
+avoid confusion with the parameters of the CPD.
+
+
Removed inference/doc, which has been superceded
+by the web page.
+
+
Removed inference/static/@stab_cond_gauss_inf_engine, which is
+broken, and all references to stable CG.
+
+
+
+
+
+
+
+
8/12/01
+
+
I removed potentials/@dpot/marginalize_pot_max.
+Now marginalize_pot for all potential classes take an optional third
+argument, specifying whether to sum out or max out.
+The dpot class also takes in optional arguments specifying whether to
+use C or genops (the global variable USE_GENOPS has been eliminated).
+
+
potentials/@dpot/marginalize_pot has been simplified by assuming
+that 'onto' is always in ascending order (i.e., we remove
+Maynard-Reid's patch). This is to keep the code identical to the C
+version and the other class implementations.
+
+
Added Ron Zohar's general/calc_mpe_bucket function,
+and my general/calc_mpe_given_inf_engine, for calculating the most
+probable explanation.
+
+
+
Added Wei Hu's jtree_C_inf_engine.
+enter_softev.c is about 2 times faster than enter_soft_evidence.m.
+
+
Added the latest version of jtree_compiled_inf_engine by Wei Hu.
+The 'C' ndx_method now calls potentials/Tables/mk_marg/mult_index,
+and the 'oldC' ndx_method calls potentials/Tables/mk_marg/mult_ndx.
+
+
Added potentials/@dpot/marginalize_pot_C.c and
+multiply_by_pot_C.c by Wei Hu.
+These can be called by setting the 'useC' argument in
+jtree_inf_engine.
+
+
Added BNT/installC.m to compile all the mex files.
+
+
Renamed prob_fully_instantiated_bnet to log_lik_complete.
+
+
Added the latest version of jtree_compiled_inf_engine by Wei Hu.
+
Added the genops class by Doug Schwarz (see
+BNT/genopsfun/README). This provides a 1-2x speed-up of
+potentials/@dpot/multiply_by_pot and divide_by_pot.
+
The function BNT/examples/static/qmr_compiled compares the
+performance gains of these new functions.
+
+
+
7/6/01
+
+
Made bk_inf_engine use the name/value argument syntax. This can
+now do max-product (Viterbi) as well as sum-product
+(forward-backward).
+
Changed examples/static/mfa1 to use the new name/value argument
+syntax.
+
+
+
+
6/28/01
+
+
+
+
Released version 3.
+Version 3 is considered a major new release
+since it is not completely backwards compatible with V2.
+V3 supports decision and utility nodes, loopy belief propagation on
+general graphs (including undirected), structure learning for non-tabular nodes,
+a simplified way of handling optional
+arguments to functions,
+and many other features which are described below.
+In addition, the documentation has been substantially rewritten.
+
+
The following functions can now take optional arguments specified
+as name/value pairs, instead of passing arguments in a fixed order:
+mk_bnet, jtree_inf_engine, tabular_CPD, gaussian_CPD, softmax_CPD, mlp_CPD,
+enter_evidence.
+This is very helpful if you want to use default values for most parameters.
+The functions remain backwards compatible with BNT2.
+
+
dsoftmax_CPD has been renamed softmax_CPD, replacing the older
+version of softmax. The directory netlab2 has been updated, and
+contains weighted versions of some of the learning routines in netlab.
+(This code is still being developed by P. Brutti.)
+
+
The "fast" versions of the inference engines, which generated
+matlab code, have been removed.
+@jtree_compiled_inf_engine now generates C code.
+(This feature is currently being developed by Wei Hu of Intel (China),
+and is not yet ready for public use.)
+
+
CPD_to_dpot, CPD_to_cpot, CPD_to_cgpot and CPD_to_upot
+are in the process of being replaced by convert_to_pot.
+
+
determine_pot_type now takes as arguments (bnet, onodes)
+instead of (onodes, cnodes, dag),
+so it can detect the presence of utility nodes as well as continuous
+nodes.
+Hence this function is not backwards compatible with BNT2.
+
+
The structure learning code (K2, mcmc) now works with any node
+type, not just tabular.
+mk_bnets_tabular has been eliminated.
+bic_score_family and dirichlet_score_family will be replaced by score_family.
+Note: learn_struct_mcmc has a new interface that is not backwards
+compatible with BNT2.
+
+
update_params_complete has been renamed bayes_update_params.
+Also, learn_params_tabular has been replaced by learn_params, which
+works for any CPD type.
+
+
Added decision/utility nodes.
+
+
+
+
6/6/01
+
+
Added soft evidence to jtree_inf_engine.
+
Changed the documentation slightly (added soft evidence and
+parameter tying, and separated parameter and structure learning).
+
Changed the parameters of determine_pot_type, so it no longer
+needs to be passed a DAG argument.
+
Fixed parameter tying in mk_bnet (num. CPDs now equals num. equiv
+classes).
+
Made learn_struct_mcmc work in matlab version 5.2 (thanks to
+Nimrod Megiddo for finding this bug).
+
Made 'acyclic.m' work for undirected graphs.
+
+
+
+
5/23/01
+
+
Added Tamar Kushnir's code for the IC* algorithm
+(learn_struct_pdag_ic_star). This learns the
+structure of a PDAG, and can identify the presence of latent
+variables.
+
+
Added Yair Weiss's code for computing the MAP assignment using
+junction tree (i.e., a new method called @dpot/marginalize_pot_max
+instead of marginalize_pot.)
+
+
Added @discrete_CPD/prob_node in addition to log_prob_node to handle
+deterministic CPDs.
+
+
+
+
5/12/01
+
+
Pierpaolo Brutti updated his mlp and dsoftmax CPD classes,
+and improved the HME code.
+
+
HME example now added to web page. (The previous example was non-hierarchical.)
+
+
Philippe Leray (author of the French documentation for BNT)
+pointed out that I was including netlab.tar unnecessarily.
+
+
+
+
5/4/01
+
+
Added mlp_CPD which defines a CPD as a (conditional) multi-layer perceptron.
+This class was written by Pierpaolo Brutti.
+
+
Added hierarchical mixtures of experts demo (due to Pierpaolo Brutti).
+
+
Fixed some bugs in dsoftmax_CPD.
+
+
Now the BNT distribution includes the whole
+Netlab library in a
+subdirectory.
+It also includes my HMM and Kalman filter toolboxes, instead of just
+fragments of them.
+
+
+
+
5/2/01
+
+
gaussian_inf_engine/enter_evidence now correctly returns the
+loglik, even if all nodes are instantiated (bug fix due to
+Michael Robert James).
+
+
Added dsoftmax_CPD which allows softmax nodes to have discrete
+and continuous parents; the discrete parents act as indices into the
+parameters for the continuous node, by analogy with conditional
+Gaussian nodes. This class was written by Pierpaolo Brutti.
+
+
+
+
3/27/01
+
+
learn_struct_mcmc no longer returns sampled_bitv.
+
Added mcmc_sample_to_hist to post-process the set of samples.
+
+
+
3/21/01
+
+
Changed license from UC to GNU Library GPL.
+
+
Made all CPD constructors accept 0 arguments, so now bnets can be
+saved to and loaded from files.
+
+
Improved the implementation of sequential and batch Bayesian
+parameter learning for tabular CPDs with completely observed data (see
+log_marg_lik_complete and update_params_complete). This code also
+handles interventional data.
+
+
Started implementing Bayesian estimation of linear Gaussian
+nodes. See root_gaussian_CPD and
+linear_gaussian_CPD. The old gaussian_CPD class has not been changed.
+
+
Renamed evaluate_CPD to log_prob_node, and simplified its
+arguments.
+
+
Renamed sample_CPD to sample_node, simplified its
+arguments, and vectorized it.
+
+
Renamed "learn_params_tabular" to "update_params_complete".
+This does Bayesian updating, but no longer computes the BIC score.
+
+
Made routines for completely observed networks (sampling,
+complete data likelihood, etc.) handle cell arrays or regular arrays,
+which are faster.
+If some nodes are not scalars, or are hidden, you must use cell arrays.
+You must convert to a cell array before passing to an inference routine.
+
+
Fixed bug in gaussian_CPD constructor. When creating CPD with
+more than 1 discrete parent with random parameters, the matrices were
+the wrong shape (Bug fix due to Xuejing Sun).
+
+
+
+
+
11/24/00
+
+
Renamed learn_params and learn_params_dbn to learn_params_em/
+learn_params_dbn_em. The return arguments are now [bnet, LLtrace,
+engine] instead of [engine, LLtrace].
+
Added structure learning code for static nets (K2, PC).
+
Renamed learn_struct_inter_full_obs as learn_struct_dbn_reveal,
+and reimplemented it to make it simpler and faster.
+
Added the factored frontier and loopy_dbn algorithms.
+
Separated the online user manual into two, for static and dynamic
+networks.
+
+
Added a counter to the BNT web page.
+
+
+
+
+
4/27/00
+
+
Fixed the typo in bat1.m
+
Added preliminary code for online inference in DBNs
+
Added coupled HMM example
+
+
+
4/23/00
+
+
Fixed the bug in the fast inference routines where the indices
+are empty (arises in bat1.m).
+
Sped up marginal_family for the fast engines by precomputing indices.
+
+
+
4/17/00
+
+
Simplified implementation of BK_inf_engine by using soft evidence.
+
Added jtree_onepass_inf_engine (which computes a single marginal)
+and modified jtree_dbn_fast to use it.
+
+
+
4/14/00
+
+
Added fast versions of jtree and BK, which are
+designed for models where the division into hidden/observed is fixed,
+and all hidden variables are discrete. These routines are 2-3 times
+faster than their non-fast counterparts.
+
+
Added graph drawing code
+contributed by Ali Taylan Cemgil from the University of Nijmegen.
+
+
+
4/10/00
+
+
Distinguished cnodes and cnodes_slice in DBNs so that kalman1
+works with BK.
+
Removed dependence on cellfun (which only exists in matlab 5.3)
+by adding isemptycell. Now the code works in 5.2.
+
Changed the UC copyright notice.
+
+
+
+
+
3/29/00
+
+
Released BNT 2.0, now with objects!
+Here are the major changes.
+
+
There are now 3 classes of objects in BNT:
+Conditional Probability Distributions, potentials (for junction tree),
+and inference engines.
+Making an inference algorithm (junction tree, sampling, loopy belief
+propagation, etc.) an object might seem counter-intuitive, but in
+fact turns out to be a good idea, since the code and documentation
+can be made modular.
+(In Java, each algorithm would be a class that implements the
+inferenceEngine interface. Since Matlab doesn't support interfaces,
+inferenceEngine is an abstract (virtual) base class.)
+
+
+
+In version 1, instead of Matlab's built-in objects,
+I used structs and a
+ simulated dispatch mechanism based on the type-tag system in the
+ classic textbook by Abelson
+ and Sussman ("Structure and Interpretation of Computer Programs",
+ MIT Press, 1985). This required editing the dispatcher every time a
+ new object type was added. It also required unique (and hence long)
+ names for each method, and allowed the user unrestricted access to
+ the internal state of objects.
+
+
+
The Bayes net itself is now a lightweight struct, and can be
+used to specify a model independently of the inference algorithm used
+to process it.
+In version 1, the inference engine was stored inside the Bayes net.
+
+
+
+
+
+
+
11/24/99
+
+
Added fixed lag smoothing, online EM and the ability to learn
+switching HMMs (POMDPs) to the HMM toolbox.
+
Renamed the HMM toolbox function 'mk_dhmm_obs_mat' to
+'mk_dhmm_obs_lik', and similarly for ghmm and mhmm. Updated references
+to these functions in BNT.
+
Changed the order of return params from kalman_filter to make it
+more natural. Updated references to this function in BNT.
+
+
+
+
+
10/27/99
+
+
Fixed line 42 of potential/cg/marginalize_cgpot and lines 32-39 of bnet/add_evidence_to_marginal
+(thanks to Rainer Deventer for spotting these bugs!)
+
+
+
+
10/21/99
+
+
Completely changed the blockmatrix class to make its semantics
+more sensible. The constructor is not backwards compatible!
+
+
+
10/6/99
+
+
Fixed all_vals = cat(1, vals{:}) in user/enter_evidence
+
Vectorized ind2subv and sub2indv and removed the C versions.
+
Made mk_CPT_from_mux_node much faster by having it call vectorized
+ind2subv
+
Added Sondhauss's bug fix to line 68 of bnet/add_evidence_to_marginal
+
In dbn/update_belief_state, instead of adding eps to likelihood if 0,
+we leave it at 0, and set the scale factor to 0 instead of dividing.
+
+
+
8/19/99
+
+
Added Ghahramani's mfa code to examples directory to compare with
+fa1, which uses BNT
+
Changed all references of assoc to stringmatch (e.g., in
+examples/mk_bat_topology)
+
+
+
June 1999
+
+
Released BNT 1.0 on the web.
+
+
+
+
August 1998
+
+
Released BNT 0.0 via email.
+
+
+
+
October 1997
+
+
First started working on Matlab version of BNT.
+
+
+
Summer 1997
+
+
First started working on C++ version of BNT while working at DEC (now Compaq) CRL.
+
+
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/dbn_hmm_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/dbn_hmm_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+ Example due to Wang Hee Lin" Visualizing graph structures in matlab
+
+We discuss some methods for visualizing graphs/ networks, including automatic
+layout of the nodes.
+We assume the graph is represented as an adjacency matrix.
+If using BNT, you can access the DAG using
+
+G = bnet.dag;
+
+
+
Matlab's biograph function
+
+The Mathworks computational biology toolbox
+has many useful graph related functions, including visualization.
+
+Click
+
+here
+for a demo.
+
+
+
+
Cemgil's draw_graph
+
+You can visualize an arbitrary graph (such as one learned using the
+structure learning routines) with Matlab code written by
+
+Ali Taylan Cemgil
+from the University of Cambridge.
+A modified version of this code
+is here
+(this is already bundled with BNT).
+Just type
+
+
+Pajek
+is an excellent, free Windows program for graph layout.
+Use adj2pajek2.m to convert a graph to the
+Pajek file format.
+
+Then Choose File->Network->Read from the menu.
+
+
AT&T Graphviz
+
+graphhviz
+is an
+open-source graph visualization package from AT&T.
+Use
+graph_to_dot
+to convert an adjacency matrix to
+the AT&T file format (the "dot" format).
+You then use dot to convert it to postscript:
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/install.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/install.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+To install, just unzip FullBNT.zip, start Matlab, and proceed as
+follows
+
+>> cd C:\kmurphy\FullBNT\FullBNT-1.0.4
+>> addpath(genpathKPM(pwd))
+
+Warning: Function C:\kmurphy\FullBNT\FullBNT-1.0.4\KPMtools\isvector.m has the same name as a MATLAB builtin. We suggest you rename the function to avoid a potential name conflict.
+> In path at 110
+ In addpath at 89
+Warning: Function C:\kmurphy\FullBNT\FullBNT-1.0.4\KPMtools\isscalar.m has the same name as a MATLAB builtin. We suggest you rename the function to avoid a potential name conflict.
+> In path at 110
+ In addpath at 89
+Warning: Function C:\kmurphy\FullBNT\FullBNT-1.0.4\KPMtools\assert.m has the same name as a MATLAB builtin. We suggest you rename the function to avoid a potential name conflict.
+> In path at 110
+ In addpath at 89
+
+>> test_BNT
+
+The genpathKPM function is like the builtin genpath function, but it
+does not add directories called 'Old' to the path, thus preventing old
+versions of functions accidently shadowing new ones.
+The warnings occur because Matlab 7 added functions with the same
+names as my functions. The BNT versions will shadow the built-in ones,
+but this should be harmless.
+
+Note: the functions installC_BNT etc. are not needed anymore: all C
+code has either been removed or is unnecessary.
+
Note: as mentioned above, with new versions of matlab you
+will get lots of warning messages, but everything should still work.
+
Note: Cory Reith tells me that, as of
+15 Sep 2009, octave 3.2.2 can run most of BNT.
+Please send email to crieth@ucsd.edu if you have questions.
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/join.gif
Binary file toolboxes/FullBNT-1.0.7/docs/join.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/license.gpl
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/license.gpl Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,450 @@
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public
+License version 2 as published by the Free Software Foundation.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+GNU Library General Public License
+
+----------------------------------------------------------------------------
+
+Table of Contents
+
+ * GNU LIBRARY GENERAL PUBLIC LICENSE
+ o Preamble
+ o TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+----------------------------------------------------------------------------
+
+GNU LIBRARY GENERAL PUBLIC LICENSE
+
+Version 2, June 1991
+
+Copyright (C) 1991 Free Software Foundation, Inc.
+675 Mass Ave, Cambridge, MA 02139, USA
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL. It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public Licenses are
+intended to guarantee your freedom to share and change free software--to
+make sure the software is free for all its users.
+
+This license, the Library General Public License, applies to some specially
+designated Free Software Foundation software, and to any other libraries
+whose authors decide to use it. You can use it for your libraries, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom
+to distribute copies of free software (and charge for this service if you
+wish), that you receive source code or can get it if you want it, that you
+can change the software or use pieces of it in new free programs; and that
+you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to
+deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you distribute
+copies of the library, or if you modify it.
+
+For example, if you distribute copies of the library, whether gratis or for
+a fee, you must give the recipients all the rights that we gave you. You
+must make sure that they, too, receive or can get the source code. If you
+link a program with the library, you must provide complete object files to
+the recipients so that they can relink them with the library, after making
+changes to the library and recompiling it. And you must show them these
+terms so they know their rights.
+
+Our method of protecting your rights has two steps: (1) copyright the
+library, and (2) offer you this license which gives you legal permission to
+copy, distribute and/or modify the library.
+
+Also, for each distributor's protection, we want to make certain that
+everyone understands that there is no warranty for this free library. If the
+library is modified by someone else and passed on, we want its recipients to
+know that what they have is not the original version, so that any problems
+introduced by others will not reflect on the original authors' reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that companies distributing free software will
+individually obtain patent licenses, thus in effect transforming the program
+into proprietary software. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+Most GNU software, including some libraries, is covered by the ordinary GNU
+General Public License, which was designed for utility programs. This
+license, the GNU Library General Public License, applies to certain
+designated libraries. This license is quite different from the ordinary one;
+be sure to read it in full, and don't assume that anything in it is the same
+as in the ordinary license.
+
+The reason we have a separate public license for some libraries is that they
+blur the distinction we usually make between modifying or adding to a
+program and simply using it. Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program. However, in a
+textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+Because of this blurred distinction, using the ordinary General Public
+License for libraries did not effectively promote software sharing, because
+most developers did not use the libraries. We concluded that weaker
+conditions might promote sharing better.
+
+However, unrestricted linking of non-free programs would deprive the users
+of those programs of all benefit from the free status of the libraries
+themselves. This Library General Public License is intended to permit
+developers of non-free programs to use free libraries, while preserving your
+freedom as a user of such programs to change the free libraries that are
+incorporated in them. (We have not seen how to achieve this as regards
+changes in header files, but we have achieved it as regards changes in the
+actual functions of the Library.) The hope is that this will lead to faster
+development of free libraries.
+
+The precise terms and conditions for copying, distribution and modification
+follow. Pay close attention to the difference between a "work based on the
+library" and a "work that uses the library". The former contains code
+derived from the library, while the latter only works together with the
+library.
+
+Note that it is possible for a library to be covered by the ordinary General
+Public License rather than by this special one.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License Agreement applies to any software library which contains a
+notice placed by the copyright holder or other authorized party saying it
+may be distributed under the terms of this Library General Public License
+(also called "this License"). Each licensee is addressed as "you".
+
+A "library" means a collection of software functions and/or data prepared so
+as to be conveniently linked with application programs (which use some of
+those functions and data) to form executables.
+
+The "Library", below, refers to any such software library or work which has
+been distributed under these terms. A "work based on the Library" means
+either the Library or any derivative work under copyright law: that is to
+say, a work containing the Library or a portion of it, either verbatim or
+with modifications and/or translated straightforwardly into another
+language. (Hereinafter, translation is included without limitation in the
+term "modification".)
+
+"Source code" for a work means the preferred form of the work for making
+modifications to it. For a library, complete source code means all the
+source code for all modules it contains, plus any associated interface
+definition files, plus the scripts used to control compilation and
+installation of the library.
+
+Activities other than copying, distribution and modification are not covered
+by this License; they are outside its scope. The act of running a program
+using the Library is not restricted, and output from such a program is
+covered only if its contents constitute a work based on the Library
+(independent of the use of the Library in a tool for writing it). Whether
+that is true depends on what the Library does and what the program that uses
+the Library does.
+
+1. You may copy and distribute verbatim copies of the Library's complete
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the notices
+that refer to this License and to the absence of any warranty; and
+distribute a copy of this License along with the Library.
+
+You may charge a fee for the physical act of transferring a copy, and you
+may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Library or any portion of it,
+thus forming a work based on the Library, and copy and distribute such
+modifications or work under the terms of Section 1 above, provided that you
+also meet all of these conditions:
+
+ o a) The modified work must itself be a software library.
+
+ o b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ o c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ o d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If identifiable
+sections of that work are not derived from the Library, and can be
+reasonably considered independent and separate works in themselves, then
+this License, and its terms, do not apply to those sections when you
+distribute them as separate works. But when you distribute the same sections
+as part of a whole which is a work based on the Library, the distribution of
+the whole must be on the terms of this License, whose permissions for other
+licensees extend to the entire whole, and thus to each and every part
+regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest your
+rights to work written entirely by you; rather, the intent is to exercise
+the right to control the distribution of derivative or collective works
+based on the Library.
+
+In addition, mere aggregation of another work not based on the Library with
+the Library (or with a work based on the Library) on a volume of a storage
+or distribution medium does not bring the other work under the scope of this
+License.
+
+3. You may opt to apply the terms of the ordinary GNU General Public License
+instead of this License to a given copy of the Library. To do this, you must
+alter all the notices that refer to this License, so that they refer to the
+ordinary GNU General Public License, version 2, instead of to this License.
+(If a newer version than version 2 of the ordinary GNU General Public
+License has appeared, then you can specify that version instead if you
+wish.) Do not make any other change in these notices.
+
+Once this change is made in a given copy, it is irreversible for that copy,
+so the ordinary GNU General Public License applies to all subsequent copies
+and derivative works made from that copy.
+
+This option is useful when you wish to copy part of the code of the Library
+into a program that is not a library.
+
+4. You may copy and distribute the Library (or a portion or derivative of
+it, under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you accompany it with the complete
+corresponding machine-readable source code, which must be distributed under
+the terms of Sections 1 and 2 above on a medium customarily used for
+software interchange.
+
+If distribution of object code is made by offering access to copy from a
+designated place, then offering equivalent access to copy the source code
+from the same place satisfies the requirement to distribute the source code,
+even though third parties are not compelled to copy the source along with
+the object code.
+
+5. A program that contains no derivative of any portion of the Library, but
+is designed to work with the Library by being compiled or linked with it, is
+called a "work that uses the Library". Such a work, in isolation, is not a
+derivative work of the Library, and therefore falls outside the scope of
+this License.
+
+However, linking a "work that uses the Library" with the Library creates an
+executable that is a derivative of the Library (because it contains portions
+of the Library), rather than a "work that uses the library". The executable
+is therefore covered by this License. Section 6 states terms for
+distribution of such executables.
+
+When a "work that uses the Library" uses material from a header file that is
+part of the Library, the object code for the work may be a derivative work
+of the Library even though the source code is not. Whether this is true is
+especially significant if the work can be linked without the Library, or if
+the work is itself a library. The threshold for this to be true is not
+precisely defined by law.
+
+If such an object file uses only numerical parameters, data structure
+layouts and accessors, and small macros and small inline functions (ten
+lines or less in length), then the use of the object file is unrestricted,
+regardless of whether it is legally a derivative work. (Executables
+containing this object code plus portions of the Library will still fall
+under Section 6.)
+
+Otherwise, if the work is a derivative of the Library, you may distribute
+the object code for the work under the terms of Section 6. Any executables
+containing that work also fall under Section 6, whether or not they are
+linked directly with the Library itself.
+
+6. As an exception to the Sections above, you may also compile or link a
+"work that uses the Library" with the Library to produce a work containing
+portions of the Library, and distribute that work under terms of your
+choice, provided that the terms permit modification of the work for the
+customer's own use and reverse engineering for debugging such modifications.
+
+You must give prominent notice with each copy of the work that the Library
+is used in it and that the Library and its use are covered by this License.
+You must supply a copy of this License. If the work during execution
+displays copyright notices, you must include the copyright notice for the
+Library among them, as well as a reference directing the user to the copy of
+this License. Also, you must do one of these things:
+
+ o a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood that
+ the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ o b) Accompany the work with a written offer, valid for at least
+ three years, to give the same user the materials specified in
+ Subsection 6a, above, for a charge no more than the cost of
+ performing this distribution.
+
+ o c) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ o d) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+For an executable, the required form of the "work that uses the Library"
+must include any data and utility programs needed for reproducing the
+executable from it. However, as a special exception, the source code
+distributed need not include anything that is normally distributed (in
+either source or binary form) with the major components (compiler, kernel,
+and so on) of the operating system on which the executable runs, unless that
+component itself accompanies the executable.
+
+It may happen that this requirement contradicts the license restrictions of
+other proprietary libraries that do not normally accompany the operating
+system. Such a contradiction means you cannot use both them and the Library
+together in an executable that you distribute.
+
+7. You may place library facilities that are a work based on the Library
+side-by-side in a single library together with other library facilities not
+covered by this License, and distribute such a combined library, provided
+that the separate distribution of the work based on the Library and of the
+other library facilities is otherwise permitted, and provided that you do
+these two things:
+
+ o a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ o b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+8. You may not copy, modify, sublicense, link with, or distribute the
+Library except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense, link with, or distribute the Library
+is void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under this
+License will not have their licenses terminated so long as such parties
+remain in full compliance.
+
+9. You are not required to accept this License, since you have not signed
+it. However, nothing else grants you permission to modify or distribute the
+Library or its derivative works. These actions are prohibited by law if you
+do not accept this License. Therefore, by modifying or distributing the
+Library (or any work based on the Library), you indicate your acceptance of
+this License to do so, and all its terms and conditions for copying,
+distributing or modifying the Library or works based on it.
+
+10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the original
+licensor to copy, distribute, link with or modify the Library subject to
+these terms and conditions. You may not impose any further restrictions on
+the recipients' exercise of the rights granted herein. You are not
+responsible for enforcing compliance by third parties to this License.
+
+11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot distribute so
+as to satisfy simultaneously your obligations under this License and any
+other pertinent obligations, then as a consequence you may not distribute
+the Library at all. For example, if a patent license would not permit
+royalty-free redistribution of the Library by all those who receive copies
+directly or indirectly through you, then the only way you could satisfy both
+it and this License would be to refrain entirely from distribution of the
+Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any patents
+or other property right claims or to contest validity of any such claims;
+this section has the sole purpose of protecting the integrity of the free
+software distribution system which is implemented by public license
+practices. Many people have made generous contributions to the wide range of
+software distributed through that system in reliance on consistent
+application of that system; it is up to the author/donor to decide if he or
+she is willing to distribute software through any other system and a
+licensee cannot impose that choice.
+
+This section is intended to make thoroughly clear what is believed to be a
+consequence of the rest of this License.
+
+12. If the distribution and/or use of the Library is restricted in certain
+countries either by patents or by copyrighted interfaces, the original
+copyright holder who places the Library under this License may add an
+explicit geographical distribution limitation excluding those countries, so
+that distribution is permitted only in or among countries not thus excluded.
+In such case, this License incorporates the limitation as if written in the
+body of this License.
+
+13. The Free Software Foundation may publish revised and/or new versions of
+the Library General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Library does not specify a license version
+number, you may choose any version ever published by the Free Software
+Foundation.
+
+14. If you wish to incorporate parts of the Library into other free programs
+whose distribution conditions are incompatible with these, write to the
+author to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals of
+preserving the free status of all derivatives of our free software and of
+promoting the sharing and reuse of software generally.
+
+NO WARRANTY
+
+15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
+THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO
+THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY
+PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR
+CORRECTION.
+
+16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO
+LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR
+THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER
+SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public
+License version 2 as published by the Free Software Foundation.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/majorFeatures.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/majorFeatures.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,113 @@
+
+
BNT supports many types of
+conditional probability distributions (nodes),
+and it is easy to add more.
+
+
Tabular (multinomial)
+
Gaussian
+
Softmax (logistic/ sigmoid)
+
Multi-layer perceptron (neural network)
+
Noisy-or
+
Deterministic
+
+
+
+
BNT supports decision and utility nodes, as well as chance
+nodes,
+i.e., influence diagrams as well as Bayes nets.
+
+
+
BNT supports static and dynamic BNs (useful for modelling dynamical systems
+and sequence data).
+
+
+
BNT supports many different inference algorithms,
+and it is easy to add more.
+
+
+
Exact inference for static BNs:
+
+
junction tree
+
variable elimination
+
brute force enumeration (for discrete nets)
+
linear algebra (for Gaussian nets)
+
Pearl's algorithm (for polytrees)
+
quickscore (for QMR)
+
+
+
+
Approximate inference for static BNs:
+
+
likelihood weighting
+
Gibbs sampling
+
loopy belief propagation
+
+
+
+
Exact inference for DBNs:
+
+
junction tree
+
frontier algorithm
+
forwards-backwards (for HMMs)
+
Kalman-RTS (for LDSs)
+
+
+
+
Approximate inference for DBNs:
+
+
Boyen-Koller
+
factored-frontier/loopy belief propagation
+
+
+
+
+
+
+BNT supports several methods for parameter learning,
+and it is easy to add more.
+
+
+
Batch MLE/MAP parameter learning using EM.
+(Each node type has its own M method, e.g. softmax nodes use IRLS,
+and each inference engine has its own E method, so the code is fully modular.)
+
+
+BNT supports several methods for regularization,
+and it is easy to add more.
+
+
Any node can have its parameters clamped (made non-adjustable).
+
Any set of compatible nodes can have their parameters tied (c.f.,
+weight sharing in a neural net).
+
Some node types (e.g., tabular) supports priors for MAP estimation.
+
Gaussian covariance matrices can be declared full or diagonal, and can
+be tied across states of their discrete parents (if any).
+
+
+
+
+BNT supports several methods for structure learning,
+and it is easy to add more.
+
+
+
Bayesian structure learning,
+using MCMC or local search (for fully observed tabular nodes only).
+
+
Constraint-based structure learning (IC/PC and IC*/FCI).
+
+
+
+
+
The source code is extensively documented, object-oriented, and free, making it
+an excellent tool for teaching, research and rapid prototyping.
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/mathbymatlab.gif
Binary file toolboxes/FullBNT-1.0.7/docs/mathbymatlab.gif has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/matlab_comparison.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/matlab_comparison.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+
+Comparison of Matlab, R/S/Splus, Gauss, etc.
+
+
+
+
+
+
Comparison of Matlab, R/S/Splus, Gauss, etc.
+
+
+
Comparison of
+mathematical programs for data analysis,
+Stefan Steinhaus, tech report, 2000.
+
+This is a very detailed comparison of features and speed of several
+interactive scientific programming environments, e.g. Matlab,
+Mathematica, Splus.
+
+
+
+
+
+
+
+We now give a more complex pattern of parameter tieing.
+(This example is due to Rainer Deventer.)
+The structure is as follows:
+
+
+
+
+Since nodes 2 and 3 in slice 2 (N7 and N8)
+have different parents than their counterparts in slice 1 (N2 and N3),
+they must be put into different equivalence classes.
+Hence we define
+
+eclass1 = [1 2 3 4 5];
+eclass2 = [1 6 7 4 5];
+
+The dotted bubbles represent the equivalence classes.
+Node 7 is the representative node for equivalence class
+6, and node 8 is the rep. for class 7, so we need to write
+
Many other combinations, for which there are (as yet) no names!
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/usage.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/usage.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3072 @@
+
+How to use the Bayes Net Toolbox
+
+
+
+
+
+
How to use the Bayes Net Toolbox
+
+This documentation was last updated on 29 October 2007.
+
+Click
+here
+for a French version of this documentation (last updated in 2005).
+
+
+To define a Bayes net, you must specify the graph structure and then
+the parameters. We look at each in turn, using a simple example
+(adapted from Russell and
+Norvig, "Artificial Intelligence: a Modern Approach", Prentice Hall,
+1995, p454).
+
+
+
Graph structure
+
+
+Consider the following network.
+
+
+
+
+
+
+
+
+To specify this directed acyclic graph (dag), we create an adjacency matrix:
+
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+
+We have numbered the nodes as follows:
+Cloudy = 1, Sprinkler = 2, Rain = 3, WetGrass = 4.
+The nodes must always be numbered in topological order, i.e.,
+ancestors before descendants.
+For a more complicated graph, this is a little inconvenient: we will
+see how to get around this below.
+
+In Matlab 6, you can use logical arrays instead of double arrays,
+which are 4 times smaller:
+
+dag = false(N,N);
+dag(C,[R S]) = true;
+...
+
+However, some graph functions (eg acyclic) do not work on
+logical arrays!
+
+You can visualize the resulting graph structure using
+the methods discussed below.
+For details on GUIs,
+click here.
+
+
Creating the Bayes net shell
+
+In addition to specifying the graph structure,
+we must specify the size and type of each node.
+If a node is discrete, its size is the
+number of possible values
+each node can take on; if a node is continuous,
+it can be a vector, and its size is the length of this vector.
+In this case, we will assume all nodes are discrete and binary.
+
+If the nodes were not binary, you could type e.g.,
+
+node_sizes = [4 2 3 5];
+
+meaning that Cloudy has 4 possible values,
+Sprinkler has 2 possible values, etc.
+Note that these are cardinal values, not ordinal, i.e.,
+they are not ordered in any way, like 'low', 'medium', 'high'.
+
+Note that optional arguments are specified using a name/value syntax.
+This is common for many BNT functions.
+In general, to find out more about a function (e.g., which optional
+arguments it takes), please see its
+documentation string by typing
+
+
+A model consists of the graph structure and the parameters.
+The parameters are represented by CPD objects (CPD = Conditional
+Probability Distribution), which define the probability distribution
+of a node given its parents.
+(We will use the terms "node" and "random variable" interchangeably.)
+The simplest kind of CPD is a table (multi-dimensional array), which
+is suitable when all the nodes are discrete-valued. Note that the discrete
+values are not assumed to be ordered in any way; that is, they
+represent categorical quantities, like male and female, rather than
+ordinal quantities, like low, medium and high.
+(We will discuss CPDs in more detail below.)
+
+Tabular CPDs, also called CPTs (conditional probability tables),
+are stored as multidimensional arrays, where the dimensions
+are arranged in the same order as the nodes, e.g., the CPT for node 4
+(WetGrass) is indexed by Sprinkler (2), Rain (3) and then WetGrass (4) itself.
+Hence the child is always the last dimension.
+If a node has no parents, its CPT is a column vector representing its
+prior.
+Note that in Matlab (unlike C), arrays are indexed
+from 1, and are layed out in memory such that the first index toggles
+fastest, e.g., the CPT for node 4 (WetGrass) is as follows
+
+
+
+where we have used the convention that false==1, true==2.
+We can create this CPT in Matlab as follows
+
+
+If we do not specify the CPT, random parameters will be
+created, i.e., each "row" of the CPT will be drawn from the uniform distribution.
+To ensure repeatable results, use
+
+rand('state', seed);
+randn('state', seed);
+
+To control the degree of randomness (entropy),
+you can sample each row of the CPT from a Dirichlet(p,p,...) distribution.
+If p << 1, this encourages "deterministic" CPTs (one entry near 1, the rest near 0).
+If p = 1, each entry is drawn from U[0,1].
+If p >> 1, the entries will all be near 1/k, where k is the arity of
+this node, i.e., each row will be nearly uniform.
+You can do this as follows, assuming this node
+is number i, and ns is the node_sizes.
+
+It is currently not possible to save/load a BNT matlab object to
+file, but this is easily fixed if you modify all the constructors
+for all the classes (see matlab documentation).
+
+
+
+Having created the BN, we can now use it for inference.
+There are many different algorithms for doing inference in Bayes nets,
+that make different tradeoffs between speed,
+complexity, generality, and accuracy.
+BNT therefore offers a variety of different inference
+"engines". We will discuss these
+in more detail below.
+For now, we will use the junction tree
+engine, which is the mother of all exact inference algorithms.
+This can be created as follows.
+
+engine = jtree_inf_engine(bnet);
+
+The other engines have similar constructors, but might take
+additional, algorithm-specific parameters.
+All engines are used in the same way, once they have been created.
+We illustrate this in the following sections.
+
+
+
+
+Suppose we want to compute the probability that the sprinker was on
+given that the grass is wet.
+The evidence consists of the fact that W=2. All the other nodes
+are hidden (unobserved). We can specify this as follows.
+
+evidence = cell(1,N);
+evidence{W} = 2;
+
+We use a 1D cell array instead of a vector to
+cope with the fact that nodes can be vectors of different lengths.
+In addition, the value [] can be used
+to denote 'no evidence', instead of having to specify the observation
+pattern as a separate argument.
+(Click here for a quick tutorial on cell
+arrays in matlab.)
+
+We are now ready to add the evidence to the engine.
+
+The behavior of this function is algorithm-specific, and is discussed
+in more detail below.
+In the case of the jtree engine,
+enter_evidence implements a two-pass message-passing scheme.
+The first return argument contains the modified engine, which
+incorporates the evidence. The second return argument contains the
+log-likelihood of the evidence. (Not all engines are capable of
+computing the log-likelihood.)
+
+Finally, we can compute p=P(S=2|W=2) as follows.
+
+
+What happens if we ask for the marginal on an observed node, e.g. P(W|W=2)?
+An observed discrete node effectively only has 1 value (the observed
+ one) --- all other values would result in 0 probability.
+For efficiency, BNT treats observed (discrete) nodes as if they were
+ set to 1, as we see below:
+
+m is a structure. The 'T' field is a multi-dimensional array (in
+this case, 3-dimensional) that contains the joint probability
+distribution on the specified nodes.
+
+The joint T(i,j,k) = P(S=i,R=j,W=k|evidence)
+should have T(i,1,k) = 0 for all i,k, since R=1 is incompatible
+with the evidence that R=2.
+Instead of creating large tables with many 0s, BNT sets the effective
+size of observed (discrete) nodes to 1, as explained above.
+This is why m.T has size 2x1x2.
+To get a 2x2x2 table, type
+
+Note: It is not always possible to compute the joint on arbitrary
+sets of nodes: it depends on which inference engine you use, as discussed
+in more detail below.
+
+
+
+
+Sometimes a node is not observed, but we have some distribution over
+its possible values; this is often called "soft" or "virtual"
+evidence.
+One can use this as follows
+
+where soft_evidence{i} is either [] (if node i has no soft evidence)
+or is a vector representing the probability distribution over i's
+possible values.
+For example, if we don't know i's exact value, but we know its
+likelihood ratio is 60/40, we can write evidence{i} = [] and
+soft_evidence{i} = [0.6 0.4].
+
+Currently only jtree_inf_engine supports this option.
+It assumes that all hidden nodes, and all nodes for
+which we have soft evidence, are discrete.
+For a longer example, see BNT/examples/static/softev1.m.
+
+
+
+
+To compute the most probable explanation (MPE) of the evidence (i.e.,
+the most probable assignment, or a mode of the joint), use
+
+[mpe, ll] = calc_mpe(engine, evidence);
+
+mpe{i} is the most likely value of node i.
+This calls enter_evidence with the 'maximize' flag set to 1, which
+causes the engine to do max-product instead of sum-product.
+The resulting max-marginals are then thresholded.
+If there is more than one maximum probability assignment, we must take
+ care to break ties in a consistent manner (thresholding the
+ max-marginals may give the wrong result). To force this behavior,
+ type
+
+[mpe, ll] = calc_mpe(engine, evidence, 1);
+
+Note that computing the MPE is someties called abductive reasoning.
+
+
+You can also use calc_mpe_bucket written by Ron Zohar,
+that does a forwards max-product pass, and then a backwards traceback
+pass, which is how Viterbi is traditionally implemented.
+
+
+
+
+
+A Conditional Probability Distributions (CPD)
+defines P(X(i) | X(Pa(i))), where X(i) is the i'th node, and X(Pa(i))
+are the parents of node i. There are many ways to represent this
+distribution, which depend in part on whether X(i) and X(Pa(i)) are
+discrete, continuous, or a combination.
+We will discuss various representations below.
+
+
+
+
+If the CPD is represented as a table (i.e., if it is a multinomial
+distribution), it has a number of parameters that is exponential in
+the number of parents. See the example above.
+
+
+
+
+A noisy-OR node is like a regular logical OR gate except that
+sometimes the effects of parents that are on get inhibited.
+Let the prob. that parent i gets inhibited be q(i).
+Then a node, C, with 2 parents, A and B, has the following CPD, where
+we use F and T to represent off and on (1 and 2 in BNT).
+
+A B P(C=off) P(C=on)
+---------------------------
+F F 1.0 0.0
+T F q(A) 1-q(A)
+F T q(B) 1-q(B)
+T T q(A)q(B) 1-q(A)q(B)
+
+Thus we see that the causes get inhibited independently.
+It is common to associate a "leak" node with a noisy-or CPD, which is
+like a parent that is always on. This can account for all other unmodelled
+causes which might turn the node on.
+
+The noisy-or distribution is similar to the logistic distribution.
+To see this, let the nodes, S(i), have values in {0,1}, and let q(i,j)
+be the prob. that j inhibits i. Then
+
+where sigma(x) = 1/(1+exp(-x)). Hence they differ in the choice of
+the activation function (although both are monotonically increasing).
+In addition, in the case of a noisy-or, the weights are constrained to be
+positive, since they derive from probabilities q(i,j).
+In both cases, the number of parameters is linear in the
+number of parents, unlike the case of a multinomial distribution,
+where the number of parameters is exponential in the number of parents.
+We will see an example of noisy-OR nodes below.
+
+
+
+
+Deterministic CPDs for discrete random variables can be created using
+the deterministic_CPD class. It is also possible to 'flip' the output
+of the function with some probability, to simulate noise.
+The boolean_CPD class is just a special case of a
+deterministic CPD, where the parents and child are all binary.
+
+Both of these classes are just "syntactic sugar" for the tabular_CPD
+class.
+
+
+
+
+
+If we have a discrete node with a continuous parent,
+we can define its CPD using a softmax function
+(also known as the multinomial logit function).
+This acts like a soft thresholding operator, and is defined as follows:
+
+The parameters of a softmax node, w(:,i) and b(i), i=1..|Q|, have the
+following interpretation: w(:,i)-w(:,j) is the normal vector to the
+decision boundary between classes i and j,
+and b(i)-b(j) is its offset (bias). For example, suppose
+X is a 2-vector, and Q is binary. Then
+
+w = [1 -1;
+ 0 0];
+
+b = [0 0];
+
+means class 1 are points in the 2D plane with positive x coordinate,
+and class 2 are points in the 2D plane with negative x coordinate.
+If w has large magnitude, the decision boundary is sharp, otherwise it
+is soft.
+In the special case that Q is binary (0/1), the softmax function reduces to the logistic
+(sigmoid) function.
+
+Fitting a softmax function can be done using the iteratively reweighted
+least squares (IRLS) algorithm.
+We use the implementation from
+Netlab.
+Note that since
+the softmax distribution is not in the exponential family, it does not
+have finite sufficient statistics, and hence we must store all the
+training data in uncompressed form.
+If this takes too much space, one should use online (stochastic) gradient
+descent (not implemented in BNT).
+
+If a softmax node also has discrete parents,
+we use a different set of w/b parameters for each combination of
+parent values, as in the conditional linear
+Gaussian CPD.
+This feature was implemented by Pierpaolo Brutti.
+He is currently extending it so that discrete parents can be treated
+as if they were continuous, by adding indicator variables to the X
+vector.
+
+We will see an example of softmax nodes below.
+
+
+
+
+Pierpaolo Brutti has implemented the mlp_CPD class, which uses a multi layer perceptron
+to implement a mapping from continuous parents to discrete children,
+similar to the softmax function.
+(If there are also discrete parents, it creates a mixture of MLPs.)
+It uses code from Netlab.
+This is work in progress.
+
+
+
+A root node has no parents and no parameters; it can be used to model
+an observed, exogeneous input variable, i.e., one which is "outside"
+the model.
+This is useful for conditional density models.
+We will see an example of root nodes below.
+
+
+
+
+We now consider a distribution suitable for the continuous-valued nodes.
+Suppose the node is called Y, its continuous parents (if any) are
+called X, and its discrete parents (if any) are called Q.
+The distribution on Y is defined as follows:
+
+- no parents: Y ~ N(mu, Sigma)
+- cts parents : Y|X=x ~ N(mu + W x, Sigma)
+- discrete parents: Y|Q=i ~ N(mu(:,i), Sigma(:,:,i))
+- cts and discrete parents: Y|X=x,Q=i ~ N(mu(:,i) + W(:,:,i) * x, Sigma(:,:,i))
+
+where N(mu, Sigma) denotes a Normal distribution with mean mu and
+covariance Sigma. Let |X|, |Y| and |Q| denote the sizes of X, Y and Q
+respectively.
+If there are no discrete parents, |Q|=1; if there is
+more than one, then |Q| = a vector of the sizes of each discrete parent.
+If there are no continuous parents, |X|=0; if there is more than one,
+then |X| = the sum of their sizes.
+Then mu is a |Y|*|Q| vector, Sigma is a |Y|*|Y|*|Q| positive
+semi-definite matrix, and W is a |Y|*|X|*|Q| regression (weight)
+matrix.
+
+We can create a Gaussian node with random parameters as follows.
+
+bnet.CPD{i} = gaussian_CPD(bnet, i);
+
+We can specify the value of one or more of the parameters as in the
+following example, in which |Y|=2, and |Q|=1.
+
+We will see an example of conditional linear Gaussian nodes below.
+
+When learning Gaussians from data, it is helpful to ensure the
+data has a small magnitde
+(see e.g., KPMstats/standardize) to prevent numerical problems.
+Unless you have a lot of data, it is also a very good idea to use
+diagonal instead of full covariance matrices.
+(BNT does not currently support spherical covariances, although it
+would be easy to add, since KPMstats/clg_Mstep supports this option;
+you would just need to modify gaussian_CPD/update_ess to accumulate
+weighted inner products.)
+
+
+
+
+
+Currently BNT does not support any CPDs for continuous nodes other
+than the Gaussian.
+However, you can use a mixture of Gaussians to
+approximate other continuous distributions. We will see some an example
+of this with the IFA model below.
+
+
+
+
+We plan to add classification and regression trees to define CPDs for
+discrete and continuous nodes, respectively.
+Trees have many advantages: they are easy to interpret, they can do
+feature selection, they can
+handle discrete and continuous inputs, they do not make strong
+assumptions about the form of the distribution, the number of
+parameters can grow in a data-dependent way (i.e., they are
+semi-parametric), they can handle missing data, etc.
+However, they are not yet implemented.
+
+
+
+
+
+We list all the different types of CPDs supported by BNT.
+For each CPD, we specify if the child and parents can be discrete (D) or
+continuous (C) (Binary (B) nodes are a special case).
+We also specify which methods each class supports.
+If a method is inherited, the name of the parent class is mentioned.
+If a parent class calls a child method, this is mentioned.
+
+The CPD_to_CPT method converts a CPD to a table; this
+requires that the child and all parents are discrete.
+The CPT might be exponentially big...
+convert_to_table evaluates a CPD with evidence, and
+represents the the resulting potential as an array.
+This requires that the child is discrete, and any continuous parents
+are observed.
+convert_to_pot evaluates a CPD with evidence, and
+represents the resulting potential as a dpot, gpot, cgpot or upot, as
+requested. (d=discrete, g=Gaussian, cg = conditional Gaussian, u =
+utility).
+
+
+When we sample a node, all the parents are observed.
+When we compute the (log) probability of a node, all the parents and
+the child are observed.
+
+We also specify if the parameters are learnable.
+For learning with EM, we require
+the methods reset_ess, update_ess and
+maximize_params.
+For learning from fully observed data, we require
+the method learn_params.
+By default, all classes inherit this from generic_CPD, which simply
+calls update_ess N times, once for each data case, followed
+by maximize_params, i.e., it is like EM, without the E step.
+Some classes implement a batch formula, which is quicker.
+
+Bayesian learning means computing a posterior over the parameters
+given fully observed data.
+
+Pearl means we implement the methods compute_pi and
+compute_lambda_msg, used by
+pearl_inf_engine, which runs on directed graphs.
+belprop_inf_engine only needs convert_to_pot.H
+The pearl methods can exploit special properties of the CPDs for
+computing the messages efficiently, whereas belprop does not.
+
+The only method implemented by generic_CPD is adjustable_CPD,
+which is not shown, since it is not very interesting.
+
+
+
+
+In Figure (a) below, we show how Factor Analysis can be thought of as a
+graphical model. Here, X has an N(0,I) prior, and
+Y|X=x ~ N(mu + Wx, Psi),
+where Psi is diagonal and W is called the "factor loading matrix".
+Since the noise on both X and Y is diagonal, the components of these
+vectors are uncorrelated, and hence can be represented as individual
+scalar nodes, as we show in (b).
+(This is useful if parts of the observations on the Y vector are occasionally missing.)
+We usually take k=|X| << |Y|=D, so the model tries to explain
+many observations using a low-dimensional subspace.
+
+
+
+
+The root node is clamped to the N(0,I) distribution, so that we will
+not update these parameters during learning.
+The mean of the leaf node is clamped to 0,
+since we assume the data has been centered (had its mean subtracted
+off); this is just for simplicity.
+Finally, the covariance of the leaf node is constrained to be
+diagonal. W0 and Psi0 are the initial parameter guesses.
+
+
+We can fit this model (i.e., estimate its parameters in a maximum
+likelihood (ML) sense) using EM, as we
+explain below.
+Not surprisingly, the ML estimates for mu and Psi turn out to be
+identical to the
+sample mean and variance, which can be computed directly as
+
+mu_ML = mean(data);
+Psi_ML = diag(cov(data));
+
+Note that W can only be identified up to a rotation matrix, because of
+the spherical symmetry of the source.
+
+
+If we restrict Psi to be spherical, i.e., Psi = sigma*I,
+there is a closed-form solution for W as well,
+i.e., we do not need to use EM.
+In particular, W contains the first |X| eigenvectors of the sample covariance
+matrix, with scalings determined by the eigenvalues and sigma.
+Classical PCA can be obtained by taking the sigma->0 limit.
+For details, see
+
+
+By adding a hidden discrete variable, we can create mixtures of FA
+models, as shown in (c).
+Now we can explain the data using a set of subspaces.
+We can create this model in BNT as follows.
+
+Notice how the covariance matrix for Y is the same for all values of
+Q; that is, the noise level in each sub-space is assumed the same.
+However, we allow the offset, mu, to vary.
+For details, see
+
+I have included Zoubin's specialized MFA code (with his permission)
+with the toolbox, so you can check that BNT gives the same results:
+see 'BNT/examples/static/mfa1.m'.
+
+
+Independent Factor Analysis (IFA) generalizes FA by allowing a
+non-Gaussian prior on each component of X.
+(Note that we can approximate a non-Gaussian prior using a mixture of
+Gaussians.)
+This means that the likelihood function is no longer rotationally
+invariant, so we can uniquely identify W and the hidden
+sources X.
+IFA also allows a non-diagonal Psi (i.e. correlations between the components of Y).
+We recover classical Independent Components Analysis (ICA)
+in the Psi -> 0 limit, and by assuming that |X|=|Y|, so that the
+weight matrix W is square and invertible.
+For details, see
+
+
+As an example of the use of the softmax function,
+we introduce the Mixture of Experts model.
+
+As before,
+circles denote continuous-valued nodes,
+squares denote discrete nodes, clear
+means hidden, and shaded means observed.
+
+
+
+
+
+
+
+
+
+X is the observed
+input, Y is the output, and
+the Q nodes are hidden "gating" nodes, which select the appropriate
+set of parameters for Y. During training, Y is assumed observed,
+but for testing, the goal is to predict Y given X.
+Note that this is a conditional density model, so we don't
+associate any parameters with X.
+Hence X's CPD will be a root CPD, which is a way of modelling
+exogenous nodes.
+If the output is a continuous-valued quantity,
+we assume the "experts" are linear-regression units,
+and set Y's CPD to linear-Gaussian.
+If the output is discrete, we set Y's CPD to a softmax function.
+The Q CPDs will always be softmax functions.
+
+
+As a concrete example, consider the mixture of experts model where X and Y are
+scalars, and Q is binary.
+This is just piecewise linear regression, where
+we have two line segments, i.e.,
+
+
+
+We can create this model with random parameters as follows.
+(This code is bundled in BNT/examples/static/mixexp2.m.)
+
+This is what the model looks like before training.
+(Thanks to Thomas Hofman for writing this plotting routine.)
+
+
+
+
+
+Now let's train the model, and plot the final performance.
+(We will discuss how to train models in more detail below.)
+
+
+ncases = size(data, 1); % each row of data is a training case
+cases = cell(3, ncases);
+cases([1 3], :) = num2cell(data'); % each column of cases is a training case
+engine = jtree_inf_engine(bnet);
+max_iter = 20;
+[bnet2, LLtrace] = learn_params_em(engine, cases, max_iter);
+
+(We specify which nodes will be observed when we create the engine.
+Hence BNT knows that the hidden nodes are all discrete.
+For complex models, this can lead to a significant speedup.)
+Below we show what the model looks like after 16 iterations of EM
+(with 100 IRLS iterations per M step), when it converged
+using the default convergence tolerance (that the
+fractional change in the log-likelihood be less than 1e-3).
+Before learning, the log-likelihood was
+-322.927442; afterwards, it was -13.728778.
+
+
+
+
+(See BNT/examples/static/mixexp2.m for details of the code.)
+
+
+
+
+
+A hierarchical mixture of experts (HME) extends the mixture of experts
+model by having more than one hidden node. A two-level example is shown below, along
+with its more traditional representation as a neural network.
+This is like a (balanced) probabilistic decision tree of height 2.
+
+
+
+
+
+Pierpaolo Brutti
+has written an extensive set of routines for HMEs,
+which are bundled with BNT: see the examples/static/HME directory.
+These routines allow you to choose the number of hidden (gating)
+layers, and the form of the experts (softmax or MLP).
+See the file hmemenu, which provides a demo.
+For example, the figure below shows the decision boundaries learned
+for a ternary classification problem, using a 2 level HME with softmax
+gates and softmax experts; the training set is on the left, the
+testing set on the right.
+
"Generalized Linear Models", McCullagh and Nelder, Chapman and
+Halll, 1983.
+
+
+"Improved learning algorithms for mixtures of experts in multiclass
+classification".
+K. Chen, L. Xu, H. Chi.
+Neural Networks (1999) 12: 1229-1252.
+
+
+
+Bayes nets originally arose out of an attempt to add probabilities to
+expert systems, and this is still the most common use for BNs.
+A famous example is
+QMR-DT, a decision-theoretic reformulation of the Quick Medical
+Reference (QMR) model.
+
+
+
+
+Here, the top layer represents hidden disease nodes, and the bottom
+layer represents observed symptom nodes.
+The goal is to infer the posterior probability of each disease given
+all the symptoms (which can be present, absent or unknown).
+Each node in the top layer has a Bernoulli prior (with a low prior
+probability that the disease is present).
+Since each node in the bottom layer has a high fan-in, we use a
+noisy-OR parameterization; each disease has an independent chance of
+causing each symptom.
+The real QMR-DT model is copyright, but
+we can create a random QMR-like model as follows.
+
+function bnet = mk_qmr_bnet(G, inhibit, leak, prior)
+% MK_QMR_BNET Make a QMR model
+% bnet = mk_qmr_bnet(G, inhibit, leak, prior)
+%
+% G(i,j) = 1 iff there is an arc from disease i to finding j
+% inhibit(i,j) = inhibition probability on i->j arc
+% leak(j) = inhibition prob. on leak->j arc
+% prior(i) = prob. disease i is on
+
+[Ndiseases Nfindings] = size(inhibit);
+N = Ndiseases + Nfindings;
+finding_node = Ndiseases+1:N;
+ns = 2*ones(1,N);
+dag = zeros(N,N);
+dag(1:Ndiseases, finding_node) = G;
+bnet = mk_bnet(dag, ns, 'observed', finding_node);
+
+for d=1:Ndiseases
+ CPT = [1-prior(d) prior(d)];
+ bnet.CPD{d} = tabular_CPD(bnet, d, CPT');
+end
+
+for i=1:Nfindings
+ fnode = finding_node(i);
+ ps = parents(G, i);
+ bnet.CPD{fnode} = noisyor_CPD(bnet, fnode, leak(i), inhibit(ps, i));
+end
+
+In the file BNT/examples/static/qmr1, we create a random bipartite
+graph G, with 5 diseases and 10 findings, and random parameters.
+(In general, to create a random dag, use 'mk_random_dag'.)
+We can visualize the resulting graph structure using
+the methods discussed below, with the
+following results:
+
+
+
+
+Now let us put some random evidence on all the leaves except the very
+first and very last, and compute the disease posteriors.
+
+Junction tree can be quite slow on large QMR models.
+Fortunately, it is possible to exploit properties of the noisy-OR
+function to speed up exact inference using an algorithm called
+quickscore, discussed below.
+
+
+
+
+
+
+
+A conditional Gaussian model is one in which, conditioned on all the discrete
+nodes, the distribution over the remaining (continuous) nodes is
+multivariate Gaussian. This means we can have arcs from discrete (D)
+to continuous (C) nodes, but not vice versa.
+(We are allowed C->D arcs if the continuous nodes are observed,
+as in the mixture of experts model,
+since this distribution can be represented with a discrete potential.)
+
+We now give an example of a CG model, from
+the paper "Propagation of Probabilities, Means amd
+Variances in Mixed Graphical Association Models", Steffen Lauritzen,
+JASA 87(420):1098--1108, 1992 (reprinted in the book "Probabilistic Networks and Expert
+Systems", R. G. Cowell, A. P. Dawid, S. L. Lauritzen and
+D. J. Spiegelhalter, Springer, 1999.)
+
+
Specifying the graph
+
+Consider the model of waste emissions from an incinerator plant shown below.
+We follow the standard convention that shaded nodes are observed,
+clear nodes are hidden.
+We also use the non-standard convention that
+square nodes are discrete (tabular) and round nodes are
+Gaussian.
+
+
+
+
+
+
+
+We can create this model as follows.
+
+F = 1; W = 2; E = 3; B = 4; C = 5; D = 6; Min = 7; Mout = 8; L = 9;
+n = 9;
+
+dag = zeros(n);
+dag(F,E)=1;
+dag(W,[E Min D]) = 1;
+dag(E,D)=1;
+dag(B,[C D])=1;
+dag(D,[L Mout])=1;
+dag(Min,Mout)=1;
+
+% node sizes - all cts nodes are scalar, all discrete nodes are binary
+ns = ones(1, n);
+dnodes = [F W B];
+cnodes = mysetdiff(1:n, dnodes);
+ns(dnodes) = 2;
+
+bnet = mk_bnet(dag, ns, 'discrete', dnodes);
+
+'dnodes' is a list of the discrete nodes; 'cnodes' is the continuous
+nodes. 'mysetdiff' is a faster version of the built-in 'setdiff'.
+
+
+
+
Specifying the parameters
+
+The parameters of the discrete nodes can be specified as follows.
+
+
+'marg' is a structure that contains the fields 'mu' and 'Sigma', which
+contain the mean and (co)variance of the marginal on E.
+In this case, they are both scalars.
+Let us check they match the published figures (to 2 decimal places).
+
+
+It is easy to visualize this posterior using standard Matlab plotting
+functions, e.g.,
+
+gaussplot2d(marg.mu, marg.Sigma);
+
+produces the following picture.
+
+
+
+
+
+
+
+
+The T field indicates that the mixing weight of this Gaussian
+component is 1.0.
+If the joint contains discrete and continuous variables, the result
+will be a mixture of Gaussians, e.g.,
+
+The interpretation is
+Sigma(i,j,k) = Cov[ E(i) E(j) | F=k ].
+In this case, E is a scalar, so i=j=1; k specifies the mixture component.
+
+We saw in the sprinkler network that BNT sets the effective size of
+observed discrete nodes to 1, since they only have one legal value.
+For continuous nodes, BNT sets their length to 0,
+since they have been reduced to a point.
+For example,
+
+It is simple to post-process the output of marginal_nodes.
+For example, the file BNT/examples/static/cg1 sets the mu term of
+observed nodes to their observed value, and the Sigma term to 0 (since
+observed nodes have no variance).
+
+
+Note that the implemented version of the junction tree is numerically
+unstable when using CG potentials
+(which is why, in the example above, we only required our answers to agree with
+the published ones to 2dp.)
+This is why you might want to use stab_cond_gauss_inf_engine,
+implemented by Shan Huang. This is described in
+
+
+
"Stable Local Computation with Conditional Gaussian Distributions",
+S. Lauritzen and F. Jensen, Tech Report R-99-2014,
+Dept. Math. Sciences, Allborg Univ., 1999.
+
+
+However, even the numerically stable version
+can be computationally intractable if there are many hidden discrete
+nodes, because the number of mixture components grows exponentially e.g., in a
+switching linear dynamical system.
+In general, one must resort to approximate inference techniques: see
+the discussion on inference engines below.
+
+
+
+
+The parameter estimation routines in BNT can be classified into 4
+types, depending on whether the goal is to compute
+a full (Bayesian) posterior over the parameters or just a point
+estimate (e.g., Maximum Likelihood or Maximum A Posteriori),
+and whether all the variables are fully observed or there is missing
+data/ hidden variables (partial observability).
+
+
+To load numeric data from an ASCII text file called 'dat.txt', where each row is a
+case and columns are separated by white-space, such as
+
+011979 1626.5 0.0
+021979 1367.0 0.0
+...
+
+you can use
+
+data = load('dat.txt');
+
+or
+
+load dat.txt -ascii
+
+In the latter case, the data is stored in a variable called 'dat' (the
+filename minus the extension).
+Alternatively, suppose the data is stored in a .csv file (has commas
+separating the columns, and contains a header line), such as
+
+header info goes here
+ORD,011979,1626.5,0.0
+DSM,021979,1367.0,0.0
+...
+
+If your file is not in either of these formats, you can either use Perl to convert
+it to this format, or use the Matlab scanf command.
+Type
+
+help iofun
+
+for more information on Matlab's file functions.
+
+
+BNT learning routines require data to be stored in a cell array.
+data{i,m} is the value of node i in case (example) m, i.e., each
+column is a case.
+If node i is not observed in case m (missing value), set
+data{i,m} = [].
+(Not all the learning routines can cope with such missing values, however.)
+In the special case that all the nodes are observed and are
+scalar-valued (as opposed to vector-valued), the data can be
+stored in a matrix (as opposed to a cell-array).
+
+Suppose, as in the mixture of experts example,
+that we have 3 nodes in the graph: X(1) is the observed input, X(3) is
+the observed output, and X(2) is a hidden (gating) node. We can
+create the dataset as follows.
+
+
+As an example, let's generate some data from the sprinkler network, randomize the parameters,
+and then try to recover the original model.
+First we create some training data using forwards sampling.
+
+samples{j,i} contains the value of the j'th node in case i.
+sample_bnet returns a cell array because, in general, each node might
+be a vector of different length.
+In this case, all nodes are discrete (and hence scalars), so we
+could have used a regular array instead (which can be quicker):
+
+data = cell2num(samples);
+
+Now we create a network with random parameters.
+(The initial values of bnet2 don't matter in this case, since we can find the
+globally optimal MLE independent of where we start.)
+
+% Make a tabula rasa
+bnet2 = mk_bnet(dag, node_sizes);
+seed = 0;
+rand('state', seed);
+bnet2.CPD{C} = tabular_CPD(bnet2, C);
+bnet2.CPD{R} = tabular_CPD(bnet2, R);
+bnet2.CPD{S} = tabular_CPD(bnet2, S);
+bnet2.CPD{W} = tabular_CPD(bnet2, W);
+
+Finally, we find the maximum likelihood estimates of the parameters.
+
+bnet3 = learn_params(bnet2, samples);
+
+To view the learned parameters, we use a little Matlab hackery.
+
+
+Currently, only tabular CPDs can have priors on their parameters.
+The conjugate prior for a multinomial is the Dirichlet.
+(For binary random variables, the multinomial is the same as the
+Bernoulli, and the Dirichlet is the same as the Beta.)
+
+The Dirichlet has a simple interpretation in terms of pseudo counts.
+If we let N_ijk = the num. times X_i=k and Pa_i=j occurs in the
+training set, where Pa_i are the parents of X_i,
+then the maximum likelihood (ML) estimate is
+T_ijk = N_ijk / N_ij (where N_ij = sum_k' N_ijk'), which will be 0 if N_ijk=0.
+To prevent us from declaring that (X_i=k, Pa_i=j) is impossible just because this
+event was not seen in the training set,
+we can pretend we saw value k of X_i, for each value j of Pa_i some number (alpha_ijk)
+of times in the past.
+The MAP (maximum a posterior) estimate is then
+
+and is never 0 if all alpha_ijk > 0.
+For example, consider the network A->B, where A is binary and B has 3
+values.
+A uniform prior for B has the form
+
+ B=1 B=2 B=3
+A=1 1 1 1
+A=2 1 1 1
+
+which can be created using
+
+tabular_CPD(bnet, i, 'prior_type', 'dirichlet', 'dirichlet_type', 'unif');
+
+This prior does not satisfy the likelihood equivalence principle,
+which says that Markov equivalent models
+should have the same marginal likelihood.
+A prior that does satisfy this principle is shown below.
+Heckerman (1995) calls this the
+BDeu prior (likelihood equivalent uniform Bayesian Dirichlet).
+
+ B=1 B=2 B=3
+A=1 1/6 1/6 1/6
+A=2 1/6 1/6 1/6
+
+where we put N/(q*r) in each bin; N is the equivalent sample size,
+r=|A|, q = |B|.
+This can be created as follows
+
+tabular_CPD(bnet, i, 'prior_type', 'dirichlet', 'dirichlet_type', 'BDeu');
+
+Here, 1 is the equivalent sample size, and is the strength of the
+prior.
+You can change this using
+
+bnet.CPD{i}.prior contains the new Dirichlet pseudocounts,
+and bnet.CPD{i}.CPT is set to the mean of the posterior (the
+normalized counts).
+(Hence if the initial pseudo counts are 0,
+bayes_update_params and learn_params will give the
+same result.)
+
+
+
+
+
+We can compute the same result sequentially (on-line) as follows.
+
+
+The file BNT/examples/static/StructLearn/model_select1 has an example of
+sequential model selection which uses the same idea.
+We generate data from the model A->B
+and compute the posterior prob of all 3 dags on 2 nodes:
+ (1) A B, (2) A <- B , (3) A -> B
+Models 2 and 3 are Markov equivalent, and therefore indistinguishable from
+observational data alone, so we expect their posteriors to be the same
+(assuming a prior which satisfies likelihood equivalence).
+If we use random parameters, the "true" model only gets a higher posterior after 2000 trials!
+However, if we make B a noisy NOT gate, the true model "wins" after 12
+trials, as shown below (red = model 1, blue/green (superimposed)
+represents models 2/3).
+
+
+
+The use of marginal likelihood for model selection is discussed in
+greater detail in the
+section on structure learning.
+
+
+
+
+
+samples2{i,l} is the value of node i in training case l, or [] if unobserved.
+
+Now we will compute the MLEs using the EM algorithm.
+We need to use an inference algorithm to compute the expected
+sufficient statistics in the E step; the M (maximization) step is as
+above.
+
+
+In networks with repeated structure (e.g., chains and grids), it is
+common to assume that the parameters are the same at every node. This
+is called parameter tying, and reduces the amount of data needed for
+learning.
+
+When we have tied parameters, there is no longer a one-to-one
+correspondence between nodes and CPDs.
+Rather, each CPD species the parameters for a whole equivalence class
+of nodes.
+It is easiest to see this by example.
+Consider the following hidden Markov
+model (HMM)
+
+
+
+
+When HMMs are used for semi-infinite processes like speech recognition,
+we assume the transition matrix
+P(H(t+1)|H(t)) is the same for all t; this is called a time-invariant
+or homogenous Markov chain.
+Hence hidden nodes 2, 3, ..., T
+are all in the same equivalence class, say class Hclass.
+Similarly, the observation matrix P(O(t)|H(t)) is assumed to be the
+same for all t, so the observed nodes are all in the same equivalence
+class, say class Oclass.
+Finally, the prior term P(H(1)) is in a class all by itself, say class
+H1class.
+This is illustrated below, where we explicitly represent the
+parameters as random variables (dotted nodes).
+
+
+
+In BNT, we cannot represent parameters as random variables (nodes).
+Instead, we "hide" the
+parameters inside one CPD for each equivalence class,
+and then specify that the other CPDs should share these parameters, as
+follows.
+
+hnodes = 1:2:2*T;
+onodes = 2:2:2*T;
+H1class = 1; Hclass = 2; Oclass = 3;
+eclass = ones(1,N);
+eclass(hnodes(2:end)) = Hclass;
+eclass(hnodes(1)) = H1class;
+eclass(onodes) = Oclass;
+% create dag and ns in the usual way
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'equiv_class', eclass);
+
+Finally, we define the parameters for each equivalence class:
+
+In general, if bnet.CPD{e} = xxx_CPD(bnet, j), then j should be a
+member of e's equivalence class; that is, it is not always the case
+that e == j. You can use bnet.rep_of_eclass(e) to return the
+representative of equivalence class e.
+BNT will look up the parents of j to determine the size
+of the CPT to use. It assumes that this is the same for all members of
+the equivalence class.
+Click here for
+a more complex example of parameter tying.
+
+Note:
+Normally one would define an HMM as a
+Dynamic Bayes Net
+(see the function BNT/examples/dynamic/mk_chmm.m).
+However, one can define an HMM as a static BN using the function
+BNT/examples/static/Models/mk_hmm_bnet.m.
+
+
+
+
+
+Update (9/29/03):
+Phillipe LeRay is developing some additional structure learning code
+on top of BNT. Click
+
+here
+for details.
+
+
+
+There are two very different approaches to structure learning:
+constraint-based and search-and-score.
+In the constraint-based approach,
+we start with a fully connected graph, and remove edges if certain
+conditional independencies are measured in the data.
+This has the disadvantage that repeated independence tests lose
+statistical power.
+
+In the more popular search-and-score approach,
+we perform a search through the space of possible DAGs, and either
+return the best one found (a point estimate), or return a sample of the
+models found (an approximation to the Bayesian posterior).
+
+The number of DAGs as a function of the number of
+nodes, G(n), is super-exponential in n,
+and is given by the following recurrence
+
+
+
+
+
+
+The first few values
+are shown below.
+
+
+
n
G(n)
+
1
1
+
2
3
+
3
25
+
4
543
+
5
29,281
+
6
3,781,503
+
7
1.1 x 10^9
+
8
7.8 x 10^11
+
9
1.2 x 10^15
+
10
4.2 x 10^18
+
+
+Since the number of DAGs is super-exponential in the number of nodes,
+we cannot exhaustively search the space, so we either use a local
+search algorithm (e.g., greedy hill climbining, perhaps with multiple
+restarts) or a global search algorithm (e.g., Markov Chain Monte
+Carlo).
+
+If we know a total ordering on the nodes,
+finding the best structure amounts to picking the best set of parents
+for each node independently.
+This is what the K2 algorithm does.
+If the ordering is unknown, we can search over orderings,
+which is more efficient than searching over DAGs (Koller and Friedman, 2000).
+
+In addition to the search procedure, we must specify the scoring
+function. There are two popular choices. The Bayesian score integrates
+out the parameters, i.e., it is the marginal likelihood of the model.
+The BIC (Bayesian Information Criterion) is defined as
+log P(D|theta_hat) - 0.5*d*log(N), where D is the data, theta_hat is
+the ML estimate of the parameters, d is the number of parameters, and
+N is the number of data cases.
+The BIC method has the advantage of not requiring a prior.
+
+BIC can be derived as a large sample
+approximation to the marginal likelihood.
+(It is also equal to the Minimum Description Length of a model.)
+However, in practice, the sample size does not need to be very large
+for the approximation to be good.
+For example, in the figure below, we plot the ratio between the log marginal likelihood
+and the BIC score against data-set size; we see that the ratio rapidly
+approaches 1, especially for non-informative priors.
+(This plot was generated by the file BNT/examples/static/bic1.m. It
+uses the water sprinkler BN with BDeu Dirichlet priors with different
+equivalent sample sizes.)
+
+
+
+
+
+
+
+
+As with parameter learning, handling missing data/ hidden variables is
+much harder than the fully observed case.
+The structure learning routines in BNT can therefore be classified into 4
+types, analogously to the parameter learning case.
+
+
+If two DAGs encode the same conditional independencies, they are
+called Markov equivalent. The set of all DAGs can be paritioned into
+Markov equivalence classes. Graphs within the same class can
+have
+the direction of some of their arcs reversed without changing any of
+the CI relationships.
+Each class can be represented by a PDAG
+(partially directed acyclic graph) called an essential graph or
+pattern. This specifies which edges must be oriented in a certain
+direction, and which may be reversed.
+
+
+When learning graph structure from observational data,
+the best one can hope to do is to identify the model up to Markov
+equivalence. To distinguish amongst graphs within the same equivalence
+class, one needs interventional data: see the discussion on active learning below.
+
+
+
+
+
+The brute-force approach to structure learning is to enumerate all
+possible DAGs, and score each one. This provides a "gold standard"
+with which to compare other algorithms. We can do this as follows.
+
+where data(i,m) is the value of node i in case m,
+and ns(i) is the size of node i.
+If the DAGs have a lot of families in common, we can cache the sufficient statistics,
+making this potentially more efficient than scoring the DAGs one at a time.
+(Caching is not currently implemented, however.)
+
+By default, we use the Bayesian scoring metric, and assume CPDs are
+represented by tables with BDeu(1) priors.
+We can override these defaults as follows.
+If we want to use uniform priors, we can say
+
+params{i} is a cell-array, containing optional arguments that are
+passed to the constructor for CPD i.
+
+Now suppose we want to use different node types, e.g.,
+Suppose nodes 1 and 2 are Gaussian, and nodes 3 and 4 softmax (both
+these CPDs can support discrete and continuous parents, which is
+necessary since all other nodes will be considered as parents).
+The Bayesian scoring metric currently only works for tabular CPDs, so
+we will use BIC:
+
+In practice, one can't enumerate all possible DAGs for N > 5,
+but one can evaluate any reasonably-sized set of hypotheses in this
+way (e.g., nearest neighbors of your current best guess).
+Think of this as "computer assisted model refinement" as opposed to de
+novo learning.
+
+
+
+
+The K2 algorithm (Cooper and Herskovits, 1992) is a greedy search algorithm that works as follows.
+Initially each node has no parents. It then adds incrementally that parent whose addition most
+increases the score of the resulting structure. When the addition of no single
+parent can increase the score, it stops adding parents to the node.
+Since we are using a fixed ordering, we do not need to check for
+cycles, and can choose the parents for each node independently.
+
+The original paper used the Bayesian scoring
+metric with tabular CPDs and Dirichlet priors.
+BNT generalizes this to allow any kind of CPD, and either the Bayesian
+scoring metric or BIC, as in the example above.
+In addition, you can specify
+an optional upper bound on the number of parents for each node.
+The file BNT/examples/static/k2demo1.m gives an example of how to use K2.
+We use the water sprinkler network and sample 100 cases from it as before.
+Then we see how much data it takes to recover the generating structure:
+
+So we see it takes about sz(10)=50 cases. (BIC behaves similarly,
+showing that the prior doesn't matter too much.)
+In general, we cannot hope to recover the "true" generating structure,
+only one that is in its Markov equivalence
+class.
+
+
+
+
+Hill-climbing starts at a specific point in space,
+considers all nearest neighbors, and moves to the neighbor
+that has the highest score; if no neighbors have higher
+score than the current point (i.e., we have reached a local maximum),
+the algorithm stops. One can then restart in another part of the space.
+
+A common definition of "neighbor" is all graphs that can be
+generated from the current graph by adding, deleting or reversing a
+single arc, subject to the acyclicity constraint.
+Other neighborhoods are possible: see
+
+Optimal Structure Identification with Greedy Search, Max
+Chickering, JMLR 2002.
+
+
+
+
+
+
+We can use a Markov Chain Monte Carlo (MCMC) algorithm called
+Metropolis-Hastings (MH) to search the space of all
+DAGs.
+The standard proposal distribution is to consider moving to all
+nearest neighbors in the sense defined above.
+
+The function can be called
+as in the following example.
+
+We can also plot the acceptance ratio versus number of MCMC steps,
+as a crude convergence diagnostic.
+
+clf
+plot(accept_ratio)
+
+
+
+Even though the number of samples needed by MCMC is theoretically
+polynomial (not exponential) in the dimensionality of the search space, in practice it has been
+found that MCMC does not converge in reasonable time for graphs with
+more than about 10 nodes.
+
+
+
+
+
+
+As was mentioned above,
+one can only learn a DAG up to Markov equivalence, even given infinite data.
+If one is interested in learning the structure of a causal network,
+one needs interventional data.
+(By "intervention" we mean forcing a node to take on a specific value,
+thereby effectively severing its incoming arcs.)
+
+Most of the scoring functions accept an optional argument
+that specifies whether a node was observed to have a certain value, or
+was forced to have that value: we set clamped(i,m)=1 if node i was
+forced in training case m. e.g., see the file
+BNT/examples/static/cooper_yoo.
+
+An interesting question is to decide which interventions to perform
+(c.f., design of experiments). For details, see the following tech
+report
+
+
+Computing the Bayesian score when there is partial observability is
+computationally challenging, because the parameter posterior becomes
+multimodal (the hidden nodes induce a mixture distribution).
+One therefore needs to use approximations such as BIC.
+Unfortunately, search algorithms are still expensive, because we need
+to run EM at each step to compute the MLE, which is needed to compute
+the score of each model. An alternative approach is
+to do the local search steps inside of the M step of EM, which is more
+efficient since the data has been "filled in" - this is
+called the structural EM algorithm (Friedman 1997), and provably
+converges to a local maximum of the BIC score.
+
+Wei Hu has implemented SEM for discrete nodes.
+You can download his package from
+here.
+Please address all questions about this code to
+wei.hu@intel.com.
+See also Phl's implementation of SEM.
+
+
+
+
+
+
+The IC algorithm (Pearl and Verma, 1991),
+and the faster, but otherwise equivalent, PC algorithm (Spirtes, Glymour, and Scheines 1993),
+computes many conditional independence tests,
+and combines these constraints into a
+PDAG to represent the whole
+Markov equivalence class.
+
+IC*/FCI extend IC/PC to handle latent variables: see below.
+(IC stands for inductive causation; PC stands for Peter and Clark,
+the first names of Spirtes and Glymour; FCI stands for fast causal
+inference.
+What we, following Pearl (2000), call IC* was called
+IC in the original Pearl and Verma paper.)
+For details, see
+
+
+The PC algorithm takes as arguments a function f, the number of nodes N,
+the maximum fan in K, and additional arguments A which are passed to f.
+The function f(X,Y,S,A) returns 1 if X is conditionally independent of Y given S, and 0
+otherwise.
+For example, suppose we cheat by
+passing in a CI "oracle" which has access to the true DAG; the oracle
+tests for d-separation in this DAG, i.e.,
+f(X,Y,S) calls dsep(X,Y,S,dag). We can to this as follows.
+
+pdag = learn_struct_pdag_pc('dsep', N, max_fan_in, dag);
+
+pdag(i,j) = -1 if there is definitely an i->j arc,
+and pdag(i,j) = 1 if there is either an i->j or and i<-j arc.
+
+So as expected, we see that the V-structure at the W node is uniquely identified,
+but the other arcs have ambiguous orientation.
+
+We now give an example from p141 (1st edn) / p103 (2nd end) of the SGS
+book.
+This example concerns the female orgasm.
+We are given a correlation matrix C between 7 measured factors (such
+as subjective experiences of coital and masturbatory experiences),
+derived from 281 samples, and want to learn a causal model of the
+data. We will not discuss the merits of this type of work here, but
+merely show how to reproduce the results in the SGS book.
+Their program,
+Tetrad,
+makes use of the Fisher Z-test for conditional
+independence, so we do the same:
+
+The results match those of Fig 12a of SGS apart from two edge
+differences; presumably this is due to rounding error (although it
+could be a bug, either in BNT or in Tetrad).
+This example can be found in the file BNT/examples/static/pc2.m.
+
+
+
+The IC* algorithm (Pearl and Verma, 1991),
+and the faster FCI algorithm (Spirtes, Glymour, and Scheines 1993),
+are like the IC/PC algorithm, except that they can detect the presence
+of latent variables.
+See the file learn_struct_pdag_ic_star written by Tamar
+Kushnir. The output is a matrix P, defined as follows
+(see Pearl (2000), p52 for details):
+
+% P(i,j) = -1 if there is either a latent variable L such that i <-L->j OR there is a directed edge from i->j.
+% P(i,j) = -2 if there is a marked directed i-*>j edge.
+% P(i,j) = P(j,i) = 1 if there is and undirected edge i--j
+% P(i,j) = P(j,i) = 2 if there is a latent variable L such that i<-L->j.
+
+
+Up until now, we have used the junction tree algorithm for inference.
+However, sometimes this is too slow, or not even applicable.
+In general, there are many inference algorithms each of which make
+different tradeoffs between speed, accuracy, complexity and
+generality. Furthermore, there might be many implementations of the
+same algorithm; for instance, a general purpose, readable version,
+and a highly-optimized, specialized one.
+To cope with this variety, we treat each inference algorithm as an
+object, which we call an inference engine.
+
+
+An inference engine is an object that contains a bnet and supports the
+'enter_evidence' and 'marginal_nodes' methods. The engine constructor
+takes the bnet as argument and may do some model-specific processing.
+When 'enter_evidence' is called, the engine may do some
+evidence-specific processing. Finally, when 'marginal_nodes' is
+called, the engine may do some query-specific processing.
+
+
+The amount of work done when each stage is specified -- structure,
+parameters, evidence, and query -- depends on the engine. The cost of
+work done early in this sequence can be amortized. On the other hand,
+one can make better optimizations if one waits until later in the
+sequence.
+For example, the parameters might imply
+conditional indpendencies that are not evident in the graph structure,
+but can nevertheless be exploited; the evidence indicates which nodes
+are observed and hence can effectively be disconnected from the
+graph; and the query might indicate that large parts of the network
+are d-separated from the query nodes. (Since it is not the actual
+values of the evidence that matters, just which nodes are observed,
+many engines allow you to specify which nodes will be observed when they are constructed,
+i.e., before calling 'enter_evidence'. Some engines can still cope if
+the actual pattern of evidence is different, e.g., if there is missing
+data.)
+
+
+Although being maximally lazy (i.e., only doing work when a query is
+issued) may seem desirable,
+this is not always the most efficient.
+For example,
+when learning using EM, we need to call marginal_nodes N times, where N is the
+number of nodes. Variable elimination would end
+up repeating a lot of work
+each time marginal_nodes is called, making it inefficient for
+learning. The junction tree algorithm, by contrast, uses dynamic
+programming to avoid this redundant computation --- it calculates all
+marginals in two passes during 'enter_evidence', so calling
+'marginal_nodes' takes constant time.
+
+We will discuss some of the inference algorithms implemented in BNT
+below, and finish with a summary of all
+of them.
+
+
+
+
+
+
+
+
+
+The variable elimination algorithm, also known as bucket elimination
+or peeling, is one of the simplest inference algorithms.
+The basic idea is to "push sums inside of products"; this is explained
+in more detail
+here.
+
+The principle of distributing sums over products can be generalized
+greatly to apply to any commutative semiring.
+This forms the basis of many common algorithms, such as Viterbi
+decoding and the Fast Fourier Transform. For details, see
+
+
+
R. McEliece and S. M. Aji, 2000.
+
+
+The Generalized Distributive Law,
+IEEE Trans. Inform. Theory, vol. 46, no. 2 (March 2000),
+pp. 325--343.
+
+
+
+Choosing an order in which to sum out the variables so as to minimize
+computational cost is known to be NP-hard.
+The implementation of this algorithm in
+var_elim_inf_engine makes no attempt to optimize this
+ordering (in contrast, say, to jtree_inf_engine, which uses a
+greedy search procedure to find a good ordering).
+
+Note: unlike most algorithms, var_elim does all its computational work
+inside of marginal_nodes, not inside of
+enter_evidence.
+
+
+
+
+
+
+The simplest inference algorithm of all is to explicitely construct
+the joint distribution over all the nodes, and then to marginalize it.
+This is implemented in global_joint_inf_engine.
+Since the size of the joint is exponential in the
+number of discrete (hidden) nodes, this is not a very practical algorithm.
+It is included merely for pedagogical and debugging purposes.
+
+Three specialized versions of this algorithm have also been implemented,
+corresponding to the cases where all the nodes are discrete (D), all
+are Gaussian (G), and some are discrete and some Gaussian (CG).
+They are called enumerative_inf_engine,
+gaussian_inf_engine,
+and cond_gauss_inf_engine respectively.
+
+Note: unlike most algorithms, these global inference algorithms do all their computational work
+inside of marginal_nodes, not inside of
+enter_evidence.
+
+
+
+
+The junction tree algorithm is quite slow on the QMR network,
+since the cliques are so big.
+One simple trick we can use is to notice that hidden leaves do not
+affect the posteriors on the roots, and hence do not need to be
+included in the network.
+A second trick is to notice that the negative findings can be
+"absorbed" into the prior:
+see the file
+BNT/examples/static/mk_minimal_qmr_bnet for details.
+
+
+A much more significant speedup is obtained by exploiting special
+properties of the noisy-or node, as done by the quickscore
+algorithm. For details, see
+
+
Heckerman, "A tractable inference algorithm for diagnosing multiple diseases", UAI 89.
+
Rish and Dechter, "On the impact of causal independence", UCI
+tech report, 1998.
+
+
+This has been implemented in BNT as a special-purpose inference
+engine, which can be created and used as follows:
+
+
+Even using quickscore, exact inference takes time that is exponential
+in the number of positive findings.
+Hence for large networks we need to resort to approximate inference techniques.
+See for example
+
+
T. Jaakkola and M. Jordan, "Variational probabilistic inference and the
+QMR-DT network", JAIR 10, 1999.
+
+
K. Murphy, Y. Weiss and M. Jordan, "Loopy belief propagation for approximate inference: an empirical study",
+ UAI 99.
+
+The latter approximation
+entails applying Pearl's belief propagation algorithm to a model even
+if it has loops (hence the name loopy belief propagation).
+Pearl's algorithm, implemented as pearl_inf_engine, gives
+exact results when applied to singly-connected graphs
+(a.k.a. polytrees, since
+the underlying undirected topology is a tree, but a node may have
+multiple parents).
+To apply this algorithm to a graph with loops,
+use pearl_inf_engine.
+This can use a centralized or distributed message passing protocol.
+You can use it as in the following example.
+
+We found that this algorithm often converges, and when it does, often
+is very accurate, but it depends on the precise setting of the
+parameter values of the network.
+(See the file BNT/examples/static/qmr1 to repeat the experiment for yourself.)
+Understanding when and why belief propagation converges/ works
+is a topic of ongoing research.
+
+pearl_inf_engine can exploit special structure in noisy-or
+and gmux nodes to compute messages efficiently.
+
+belprop_inf_engine is like pearl, but uses potentials to
+represent messages. Hence this is slower.
+
+belprop_fg_inf_engine is like belprop,
+but is designed for factor graphs.
+
+
+
+
+
+BNT now (Mar '02) has two sampling (Monte Carlo) inference algorithms:
+
+
likelihood_weighting_inf_engine which does importance
+sampling and can handle any node type.
+
gibbs_sampling_inf_engine, written by Bhaskara Marthi.
+Currently this can only handle tabular CPDs.
+For a much faster and more powerful Gibbs sampling program, see
+BUGS.
+
+Note: To generate samples from a network (which is not the same as inference!),
+use sample_bnet.
+
+
+
+
+
+
+The inference engines differ in many ways. Here are
+some of the major "axes":
+
+
Works for all topologies or makes restrictions?
+
Works for all node types or makes restrictions?
+
Exact or approximate inference?
+
+
+
+In terms of topology, most engines handle any kind of DAG.
+belprop_fg does approximate inference on factor graphs (FG), which
+can be used to represent directed, undirected, and mixed (chain)
+graphs.
+(In the future, we plan to support exact inference on chain graphs.)
+quickscore only works on QMR-like models.
+
+In terms of node types: algorithms that use potentials can handle
+discrete (D), Gaussian (G) or conditional Gaussian (CG) models.
+Sampling algorithms can essentially handle any kind of node (distribution).
+Other algorithms make more restrictive assumptions in exchange for
+speed.
+
+Finally, most algorithms are designed to give the exact answer.
+The belief propagation algorithms are exact if applied to trees, and
+in some other cases.
+Sampling is considered approximate, even though, in the limit of an
+infinite number of samples, it gives the exact answer.
+
+
+
+Here is a summary of the properties
+of all the engines in BNT which work on static networks.
+
+LIMIDs explicitely show all information arcs, rather than implicitely
+assuming no forgetting. This allows them to model forgetful
+controllers.
+
+See the examples in BNT/examples/limids for details.
+
+
+
+
+
DBNs, HMMs, Kalman filters and all that
+
+Click here for documentation about how to
+use BNT for dynamical systems and sequence data.
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/usage_02nov13.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/usage_02nov13.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3215 @@
+
+How to use the Bayes Net Toolbox
+
+
+
+
+
+
How to use the Bayes Net Toolbox
+
+This documentation was last updated on 13 November 2002.
+
+Click here for a list of changes made to
+BNT.
+
+Click
+here
+for a French version of this documentation (which might not
+be up-to-date).
+
+
+
Unpack the file. In Unix, type
+
+"unzip FullBNT.zip".
+In Windows, use
+a program like Winzip. This will
+create a directory called FullBNT, which contains BNT and other libraries.
+
+
+
Read the file BNT/README to make sure the date
+matches the one on the top of the BNT home page.
+If not, you may need to press 'refresh' on your browser, and download
+again, to get the most recent version.
+
+
+
Edit the file "BNT/add_BNT_to_path.m" so it contains the correct
+pathname.
+For example, in Windows,
+I download FullBNT.zip into C:\kpmurphy\matlab, and
+then comment out the second line (with the % character), and uncomment
+the third line, which reads
+
+BNT_HOME = 'C:\kpmurphy\matlab\FullBNT';
+
+
+
+
Start up Matlab.
+
+
+
Type "ver" at the Matlab prompt (">>").
+You need Matlab version 5.2 or newer to run BNT.
+(Versions 5.0 and 5.1 have a memory leak which seems to sometimes
+crash BNT.)
+
+
+
Move to the BNT directory.
+For example, in Windows, I type
+
+>> cd C:\kpmurphy\matlab\FullBNT\BNT
+
+
+
+
Type "add_BNT_to_path".
+This executes the command
+addpath(genpath(BNT_HOME)),
+which adds all directories below FullBNT to the matlab path.
+
+
+
Type "test_BNT".
+
+If all goes well, this will produce a bunch of numbers and maybe some
+warning messages (which you can ignore), but no error messages.
+(The warnings should only be of the form
+"Warning: Maximum number of iterations has been exceeded", and are
+produced by Netlab.)
+
+
+
+
+If you are new to Matlab, you might like to check out
+some useful Matlab tips.
+For instance, this explains how to create a startup file, which can be
+used to set your path variable automatically, so you can avoid having
+to type the above commands every time.
+
+
+
+
+
+
+Some BNT functions also have C implementations.
+It is not necessary to install the C code, but it can result in a speedup
+of a factor of 5-10.
+To install all the C code,
+edit installC_BNT.m so it contains the right path,
+then type installC_BNT.
+To uninstall all the C code,
+edit uninstallC_BNT.m so it contains the right path,
+then type uninstallC_BNT.
+For an up-to-date list of the files which have C implementations, see
+BNT/installC_BNT.m.
+
+
+mex is a script that lets you call C code from Matlab - it does not compile matlab to
+C (see mcc below).
+If your C/C++ compiler is set up correctly, mex should work out of
+the box.
+If not, you might need to type
+
+In general, typing
+'mex foo.c' from inside Matlab creates a file called
+'foo.mexglx' or 'foo.dll' (the exact file
+extension is system dependent - on Linux it is 'mexglx', on Windows it is '.dll').
+The resulting file will hide the original 'foo.m' (if it existed), i.e.,
+typing 'foo' at the prompt will call the compiled C version.
+To reveal the original matlab version, just delete foo.mexglx (this is
+what uninstallC does).
+
+Sometimes it takes time for Matlab to realize that the file has
+changed from matlab to C or vice versa; try typing 'clear all' or
+restarting Matlab to refresh it.
+To find out which version of a file you are running, type
+'which foo'.
+
+mcc, the
+Matlab to C compiler, is a separate product,
+and is quite different from mex. It does not yet support
+objects/classes, which is why we can't compile all of BNT to C automatically.
+Also, hand-written C code is usually much
+better than the C code generated by mcc.
+
+
+
+Acknowledgements:
+Although I wrote some of the C code, most of
+the C code (e.g., for jtree and dpot) was written by Wei Hu;
+the triangulation C code was written by Ilya Shpitser.
+
+
+
+
+To define a Bayes net, you must specify the graph structure and then
+the parameters. We look at each in turn, using a simple example
+(adapted from Russell and
+Norvig, "Artificial Intelligence: a Modern Approach", Prentice Hall,
+1995, p454).
+
+
+
Graph structure
+
+
+Consider the following network.
+
+
+
+
+
+
+
+
+To specify this directed acyclic graph (dag), we create an adjacency matrix:
+
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+
+We have numbered the nodes as follows:
+Cloudy = 1, Sprinkler = 2, Rain = 3, WetGrass = 4.
+The nodes must always be numbered in topological order, i.e.,
+ancestors before descendants.
+For a more complicated graph, this is a little inconvenient: we will
+see how to get around this below.
+
+In Matlab 6, you can use logical arrays instead of double arrays,
+which are 4 times smaller:
+
+dag = false(N,N);
+dag(C,[R S]) = true;
+...
+
+
+A preliminary attempt to make a GUI
+has been writte by Philippe LeRay and can be downloaded
+from here.
+
+You can visualize the resulting graph structure using
+the methods discussed below.
+
+
Creating the Bayes net shell
+
+In addition to specifying the graph structure,
+we must specify the size and type of each node.
+If a node is discrete, its size is the
+number of possible values
+each node can take on; if a node is continuous,
+it can be a vector, and its size is the length of this vector.
+In this case, we will assume all nodes are discrete and binary.
+
+If the nodes were not binary, you could type e.g.,
+
+node_sizes = [4 2 3 5];
+
+meaning that Cloudy has 4 possible values,
+Sprinkler has 2 possible values, etc.
+Note that these are cardinal values, not ordinal, i.e.,
+they are not ordered in any way, like 'low', 'medium', 'high'.
+
+Note that optional arguments are specified using a name/value syntax.
+This is common for many BNT functions.
+In general, to find out more about a function (e.g., which optional
+arguments it takes), please see its
+documentation string by typing
+
+
+A model consists of the graph structure and the parameters.
+The parameters are represented by CPD objects (CPD = Conditional
+Probability Distribution), which define the probability distribution
+of a node given its parents.
+(We will use the terms "node" and "random variable" interchangeably.)
+The simplest kind of CPD is a table (multi-dimensional array), which
+is suitable when all the nodes are discrete-valued. Note that the discrete
+values are not assumed to be ordered in any way; that is, they
+represent categorical quantities, like male and female, rather than
+ordinal quantities, like low, medium and high.
+(We will discuss CPDs in more detail below.)
+
+Tabular CPDs, also called CPTs (conditional probability tables),
+are stored as multidimensional arrays, where the dimensions
+are arranged in the same order as the nodes, e.g., the CPT for node 4
+(WetGrass) is indexed by Sprinkler (2), Rain (3) and then WetGrass (4) itself.
+Hence the child is always the last dimension.
+If a node has no parents, its CPT is a column vector representing its
+prior.
+Note that in Matlab (unlike C), arrays are indexed
+from 1, and are layed out in memory such that the first index toggles
+fastest, e.g., the CPT for node 4 (WetGrass) is as follows
+
+
+
+where we have used the convention that false==1, true==2.
+We can create this CPT in Matlab as follows
+
+
+If we do not specify the CPT, random parameters will be
+created, i.e., each "row" of the CPT will be drawn from the uniform distribution.
+To ensure repeatable results, use
+
+rand('state', seed);
+randn('state', seed);
+
+To control the degree of randomness (entropy),
+you can sample each row of the CPT from a Dirichlet(p,p,...) distribution.
+If p << 1, this encourages "deterministic" CPTs (one entry near 1, the rest near 0).
+If p = 1, each entry is drawn from U[0,1].
+If p >> 1, the entries will all be near 1/k, where k is the arity of
+this node, i.e., each row will be nearly uniform.
+You can do this as follows, assuming this node
+is number i, and ns is the node_sizes.
+
+
+Having created the BN, we can now use it for inference.
+There are many different algorithms for doing inference in Bayes nets,
+that make different tradeoffs between speed,
+complexity, generality, and accuracy.
+BNT therefore offers a variety of different inference
+"engines". We will discuss these
+in more detail below.
+For now, we will use the junction tree
+engine, which is the mother of all exact inference algorithms.
+This can be created as follows.
+
+engine = jtree_inf_engine(bnet);
+
+The other engines have similar constructors, but might take
+additional, algorithm-specific parameters.
+All engines are used in the same way, once they have been created.
+We illustrate this in the following sections.
+
+
+
+
+Suppose we want to compute the probability that the sprinker was on
+given that the grass is wet.
+The evidence consists of the fact that W=2. All the other nodes
+are hidden (unobserved). We can specify this as follows.
+
+evidence = cell(1,N);
+evidence{W} = 2;
+
+We use a 1D cell array instead of a vector to
+cope with the fact that nodes can be vectors of different lengths.
+In addition, the value [] can be used
+to denote 'no evidence', instead of having to specify the observation
+pattern as a separate argument.
+(Click here for a quick tutorial on cell
+arrays in matlab.)
+
+We are now ready to add the evidence to the engine.
+
+The behavior of this function is algorithm-specific, and is discussed
+in more detail below.
+In the case of the jtree engine,
+enter_evidence implements a two-pass message-passing scheme.
+The first return argument contains the modified engine, which
+incorporates the evidence. The second return argument contains the
+log-likelihood of the evidence. (Not all engines are capable of
+computing the log-likelihood.)
+
+Finally, we can compute p=P(S=2|W=2) as follows.
+
+
+What happens if we ask for the marginal on an observed node, e.g. P(W|W=2)?
+An observed discrete node effectively only has 1 value (the observed
+ one) --- all other values would result in 0 probability.
+For efficiency, BNT treats observed (discrete) nodes as if they were
+ set to 1, as we see below:
+
+m is a structure. The 'T' field is a multi-dimensional array (in
+this case, 3-dimensional) that contains the joint probability
+distribution on the specified nodes.
+
+The joint T(i,j,k) = P(S=i,R=j,W=k|evidence)
+should have T(i,1,k) = 0 for all i,k, since R=1 is incompatible
+with the evidence that R=2.
+Instead of creating large tables with many 0s, BNT sets the effective
+size of observed (discrete) nodes to 1, as explained above.
+This is why m.T has size 2x1x2.
+To get a 2x2x2 table, type
+
+Note: It is not always possible to compute the joint on arbitrary
+sets of nodes: it depends on which inference engine you use, as discussed
+in more detail below.
+
+
+
+
+Sometimes a node is not observed, but we have some distribution over
+its possible values; this is often called "soft" or "virtual"
+evidence.
+One can use this as follows
+
+where soft_evidence{i} is either [] (if node i has no soft evidence)
+or is a vector representing the probability distribution over i's
+possible values.
+For example, if we don't know i's exact value, but we know its
+likelihood ratio is 60/40, we can write evidence{i} = [] and
+soft_evidence{i} = [0.6 0.4].
+
+Currently only jtree_inf_engine supports this option.
+It assumes that all hidden nodes, and all nodes for
+which we have soft evidence, are discrete.
+For a longer example, see BNT/examples/static/softev1.m.
+
+
+
+
+To compute the most probable explanation (MPE) of the evidence (i.e.,
+the most probable assignment, or a mode of the joint), use
+
+[mpe, ll] = calc_mpe(engine, evidence);
+
+mpe{i} is the most likely value of node i.
+This calls enter_evidence with the 'maximize' flag set to 1, which
+causes the engine to do max-product instead of sum-product.
+The resulting max-marginals are then thresholded.
+If there is more than one maximum probability assignment, we must take
+ care to break ties in a consistent manner (thresholding the
+ max-marginals may give the wrong result). To force this behavior,
+ type
+
+[mpe, ll] = calc_mpe(engine, evidence, 1);
+
+Note that computing the MPE is someties called abductive reasoning.
+
+
+You can also use calc_mpe_bucket written by Ron Zohar,
+that does a forwards max-product pass, and then a backwards traceback
+pass, which is how Viterbi is traditionally implemented.
+
+
+
+
+
+A Conditional Probability Distributions (CPD)
+defines P(X(i) | X(Pa(i))), where X(i) is the i'th node, and X(Pa(i))
+are the parents of node i. There are many ways to represent this
+distribution, which depend in part on whether X(i) and X(Pa(i)) are
+discrete, continuous, or a combination.
+We will discuss various representations below.
+
+
+
+
+If the CPD is represented as a table (i.e., if it is a multinomial
+distribution), it has a number of parameters that is exponential in
+the number of parents. See the example above.
+
+
+
+
+A noisy-OR node is like a regular logical OR gate except that
+sometimes the effects of parents that are on get inhibited.
+Let the prob. that parent i gets inhibited be q(i).
+Then a node, C, with 2 parents, A and B, has the following CPD, where
+we use F and T to represent off and on (1 and 2 in BNT).
+
+A B P(C=off) P(C=on)
+---------------------------
+F F 1.0 0.0
+T F q(A) 1-q(A)
+F T q(B) 1-q(B)
+T T q(A)q(B) q-q(A)q(B)
+
+Thus we see that the causes get inhibited independently.
+It is common to associate a "leak" node with a noisy-or CPD, which is
+like a parent that is always on. This can account for all other unmodelled
+causes which might turn the node on.
+
+The noisy-or distribution is similar to the logistic distribution.
+To see this, let the nodes, S(i), have values in {0,1}, and let q(i,j)
+be the prob. that j inhibits i. Then
+
+where sigma(x) = 1/(1+exp(-x)). Hence they differ in the choice of
+the activation function (although both are monotonically increasing).
+In addition, in the case of a noisy-or, the weights are constrained to be
+positive, since they derive from probabilities q(i,j).
+In both cases, the number of parameters is linear in the
+number of parents, unlike the case of a multinomial distribution,
+where the number of parameters is exponential in the number of parents.
+We will see an example of noisy-OR nodes below.
+
+
+
+
+Deterministic CPDs for discrete random variables can be created using
+the deterministic_CPD class. It is also possible to 'flip' the output
+of the function with some probability, to simulate noise.
+The boolean_CPD class is just a special case of a
+deterministic CPD, where the parents and child are all binary.
+
+Both of these classes are just "syntactic sugar" for the tabular_CPD
+class.
+
+
+
+
+
+If we have a discrete node with a continuous parent,
+we can define its CPD using a softmax function
+(also known as the multinomial logit function).
+This acts like a soft thresholding operator, and is defined as follows:
+
+The parameters of a softmax node, w(:,i) and b(i), i=1..|Q|, have the
+following interpretation: w(:,i)-w(:,j) is the normal vector to the
+decision boundary between classes i and j,
+and b(i)-b(j) is its offset (bias). For example, suppose
+X is a 2-vector, and Q is binary. Then
+
+w = [1 -1;
+ 0 0];
+
+b = [0 0];
+
+means class 1 are points in the 2D plane with positive x coordinate,
+and class 2 are points in the 2D plane with negative x coordinate.
+If w has large magnitude, the decision boundary is sharp, otherwise it
+is soft.
+In the special case that Q is binary (0/1), the softmax function reduces to the logistic
+(sigmoid) function.
+
+Fitting a softmax function can be done using the iteratively reweighted
+least squares (IRLS) algorithm.
+We use the implementation from
+Netlab.
+Note that since
+the softmax distribution is not in the exponential family, it does not
+have finite sufficient statistics, and hence we must store all the
+training data in uncompressed form.
+If this takes too much space, one should use online (stochastic) gradient
+descent (not implemented in BNT).
+
+If a softmax node also has discrete parents,
+we use a different set of w/b parameters for each combination of
+parent values, as in the conditional linear
+Gaussian CPD.
+This feature was implemented by Pierpaolo Brutti.
+He is currently extending it so that discrete parents can be treated
+as if they were continuous, by adding indicator variables to the X
+vector.
+
+We will see an example of softmax nodes below.
+
+
+
+
+Pierpaolo Brutti has implemented the mlp_CPD class, which uses a multi layer perceptron
+to implement a mapping from continuous parents to discrete children,
+similar to the softmax function.
+(If there are also discrete parents, it creates a mixture of MLPs.)
+It uses code from Netlab.
+This is work in progress.
+
+
+
+A root node has no parents and no parameters; it can be used to model
+an observed, exogeneous input variable, i.e., one which is "outside"
+the model.
+This is useful for conditional density models.
+We will see an example of root nodes below.
+
+
+
+
+We now consider a distribution suitable for the continuous-valued nodes.
+Suppose the node is called Y, its continuous parents (if any) are
+called X, and its discrete parents (if any) are called Q.
+The distribution on Y is defined as follows:
+
+- no parents: Y ~ N(mu, Sigma)
+- cts parents : Y|X=x ~ N(mu + W x, Sigma)
+- discrete parents: Y|Q=i ~ N(mu(:,i), Sigma(:,:,i))
+- cts and discrete parents: Y|X=x,Q=i ~ N(mu(:,i) + W(:,:,i) * x, Sigma(:,:,i))
+
+where N(mu, Sigma) denotes a Normal distribution with mean mu and
+covariance Sigma. Let |X|, |Y| and |Q| denote the sizes of X, Y and Q
+respectively.
+If there are no discrete parents, |Q|=1; if there is
+more than one, then |Q| = a vector of the sizes of each discrete parent.
+If there are no continuous parents, |X|=0; if there is more than one,
+then |X| = the sum of their sizes.
+Then mu is a |Y|*|Q| vector, Sigma is a |Y|*|Y|*|Q| positive
+semi-definite matrix, and W is a |Y|*|X|*|Q| regression (weight)
+matrix.
+
+We can create a Gaussian node with random parameters as follows.
+
+bnet.CPD{i} = gaussian_CPD(bnet, i);
+
+We can specify the value of one or more of the parameters as in the
+following example, in which |Y|=2, and |Q|=1.
+
+We will see an example of conditional linear Gaussian nodes below.
+
+When learning Gaussians from data, it is helpful to ensure the
+data has a small magnitde
+(see e.g., KPMstats/standardize) to prevent numerical problems.
+Unless you have a lot of data, it is also a very good idea to use
+diagonal instead of full covariance matrices.
+(BNT does not currently support spherical covariances, although it
+would be easy to add, since KPMstats/clg_Mstep supports this option;
+you would just need to modify gaussian_CPD/update_ess to accumulate
+weighted inner products.)
+
+
+
+
+
+Currently BNT does not support any CPDs for continuous nodes other
+than the Gaussian.
+However, you can use a mixture of Gaussians to
+approximate other continuous distributions. We will see some an example
+of this with the IFA model below.
+
+
+
+
+We plan to add classification and regression trees to define CPDs for
+discrete and continuous nodes, respectively.
+Trees have many advantages: they are easy to interpret, they can do
+feature selection, they can
+handle discrete and continuous inputs, they do not make strong
+assumptions about the form of the distribution, the number of
+parameters can grow in a data-dependent way (i.e., they are
+semi-parametric), they can handle missing data, etc.
+However, they are not yet implemented.
+
+
+
+
+
+We list all the different types of CPDs supported by BNT.
+For each CPD, we specify if the child and parents can be discrete (D) or
+continuous (C) (Binary (B) nodes are a special case).
+We also specify which methods each class supports.
+If a method is inherited, the name of the parent class is mentioned.
+If a parent class calls a child method, this is mentioned.
+
+The CPD_to_CPT method converts a CPD to a table; this
+requires that the child and all parents are discrete.
+The CPT might be exponentially big...
+convert_to_table evaluates a CPD with evidence, and
+represents the the resulting potential as an array.
+This requires that the child is discrete, and any continuous parents
+are observed.
+convert_to_pot evaluates a CPD with evidence, and
+represents the resulting potential as a dpot, gpot, cgpot or upot, as
+requested. (d=discrete, g=Gaussian, cg = conditional Gaussian, u =
+utility).
+
+
+When we sample a node, all the parents are observed.
+When we compute the (log) probability of a node, all the parents and
+the child are observed.
+
+We also specify if the parameters are learnable.
+For learning with EM, we require
+the methods reset_ess, update_ess and
+maximize_params.
+For learning from fully observed data, we require
+the method learn_params.
+By default, all classes inherit this from generic_CPD, which simply
+calls update_ess N times, once for each data case, followed
+by maximize_params, i.e., it is like EM, without the E step.
+Some classes implement a batch formula, which is quicker.
+
+Bayesian learning means computing a posterior over the parameters
+given fully observed data.
+
+Pearl means we implement the methods compute_pi and
+compute_lambda_msg, used by
+pearl_inf_engine, which runs on directed graphs.
+belprop_inf_engine only needs convert_to_pot.H
+The pearl methods can exploit special properties of the CPDs for
+computing the messages efficiently, whereas belprop does not.
+
+The only method implemented by generic_CPD is adjustable_CPD,
+which is not shown, since it is not very interesting.
+
+
+
+
+In Figure (a) below, we show how Factor Analysis can be thought of as a
+graphical model. Here, X has an N(0,I) prior, and
+Y|X=x ~ N(mu + Wx, Psi),
+where Psi is diagonal and W is called the "factor loading matrix".
+Since the noise on both X and Y is diagonal, the components of these
+vectors are uncorrelated, and hence can be represented as individual
+scalar nodes, as we show in (b).
+(This is useful if parts of the observations on the Y vector are occasionally missing.)
+We usually take k=|X| << |Y|=D, so the model tries to explain
+many observations using a low-dimensional subspace.
+
+
+
+
+The root node is clamped to the N(0,I) distribution, so that we will
+not update these parameters during learning.
+The mean of the leaf node is clamped to 0,
+since we assume the data has been centered (had its mean subtracted
+off); this is just for simplicity.
+Finally, the covariance of the leaf node is constrained to be
+diagonal. W0 and Psi0 are the initial parameter guesses.
+
+
+We can fit this model (i.e., estimate its parameters in a maximum
+likelihood (ML) sense) using EM, as we
+explain below.
+Not surprisingly, the ML estimates for mu and Psi turn out to be
+identical to the
+sample mean and variance, which can be computed directly as
+
+mu_ML = mean(data);
+Psi_ML = diag(cov(data));
+
+Note that W can only be identified up to a rotation matrix, because of
+the spherical symmetry of the source.
+
+
+If we restrict Psi to be spherical, i.e., Psi = sigma*I,
+there is a closed-form solution for W as well,
+i.e., we do not need to use EM.
+In particular, W contains the first |X| eigenvectors of the sample covariance
+matrix, with scalings determined by the eigenvalues and sigma.
+Classical PCA can be obtained by taking the sigma->0 limit.
+For details, see
+
+
+By adding a hidden discrete variable, we can create mixtures of FA
+models, as shown in (c).
+Now we can explain the data using a set of subspaces.
+We can create this model in BNT as follows.
+
+Notice how the covariance matrix for Y is the same for all values of
+Q; that is, the noise level in each sub-space is assumed the same.
+However, we allow the offset, mu, to vary.
+For details, see
+
+I have included Zoubin's specialized MFA code (with his permission)
+with the toolbox, so you can check that BNT gives the same results:
+see 'BNT/examples/static/mfa1.m'.
+
+
+Independent Factor Analysis (IFA) generalizes FA by allowing a
+non-Gaussian prior on each component of X.
+(Note that we can approximate a non-Gaussian prior using a mixture of
+Gaussians.)
+This means that the likelihood function is no longer rotationally
+invariant, so we can uniquely identify W and the hidden
+sources X.
+IFA also allows a non-diagonal Psi (i.e. correlations between the components of Y).
+We recover classical Independent Components Analysis (ICA)
+in the Psi -> 0 limit, and by assuming that |X|=|Y|, so that the
+weight matrix W is square and invertible.
+For details, see
+
+
+As an example of the use of the softmax function,
+we introduce the Mixture of Experts model.
+
+As before,
+circles denote continuous-valued nodes,
+squares denote discrete nodes, clear
+means hidden, and shaded means observed.
+
+
+
+
+
+
+
+
+
+X is the observed
+input, Y is the output, and
+the Q nodes are hidden "gating" nodes, which select the appropriate
+set of parameters for Y. During training, Y is assumed observed,
+but for testing, the goal is to predict Y given X.
+Note that this is a conditional density model, so we don't
+associate any parameters with X.
+Hence X's CPD will be a root CPD, which is a way of modelling
+exogenous nodes.
+If the output is a continuous-valued quantity,
+we assume the "experts" are linear-regression units,
+and set Y's CPD to linear-Gaussian.
+If the output is discrete, we set Y's CPD to a softmax function.
+The Q CPDs will always be softmax functions.
+
+
+As a concrete example, consider the mixture of experts model where X and Y are
+scalars, and Q is binary.
+This is just piecewise linear regression, where
+we have two line segments, i.e.,
+
+
+
+We can create this model with random parameters as follows.
+(This code is bundled in BNT/examples/static/mixexp2.m.)
+
+This is what the model looks like before training.
+(Thanks to Thomas Hofman for writing this plotting routine.)
+
+
+
+
+
+Now let's train the model, and plot the final performance.
+(We will discuss how to train models in more detail below.)
+
+
+ncases = size(data, 1); % each row of data is a training case
+cases = cell(3, ncases);
+cases([1 3], :) = num2cell(data'); % each column of cases is a training case
+engine = jtree_inf_engine(bnet);
+max_iter = 20;
+[bnet2, LLtrace] = learn_params_em(engine, cases, max_iter);
+
+(We specify which nodes will be observed when we create the engine.
+Hence BNT knows that the hidden nodes are all discrete.
+For complex models, this can lead to a significant speedup.)
+Below we show what the model looks like after 16 iterations of EM
+(with 100 IRLS iterations per M step), when it converged
+using the default convergence tolerance (that the
+fractional change in the log-likelihood be less than 1e-3).
+Before learning, the log-likelihood was
+-322.927442; afterwards, it was -13.728778.
+
+
+
+
+(See BNT/examples/static/mixexp2.m for details of the code.)
+
+
+
+
+
+A hierarchical mixture of experts (HME) extends the mixture of experts
+model by having more than one hidden node. A two-level example is shown below, along
+with its more traditional representation as a neural network.
+This is like a (balanced) probabilistic decision tree of height 2.
+
+
+
+
+
+Pierpaolo Brutti
+has written an extensive set of routines for HMEs,
+which are bundled with BNT: see the examples/static/HME directory.
+These routines allow you to choose the number of hidden (gating)
+layers, and the form of the experts (softmax or MLP).
+See the file hmemenu, which provides a demo.
+For example, the figure below shows the decision boundaries learned
+for a ternary classification problem, using a 2 level HME with softmax
+gates and softmax experts; the training set is on the left, the
+testing set on the right.
+
"Generalized Linear Models", McCullagh and Nelder, Chapman and
+Halll, 1983.
+
+
+"Improved learning algorithms for mixtures of experts in multiclass
+classification".
+K. Chen, L. Xu, H. Chi.
+Neural Networks (1999) 12: 1229-1252.
+
+
+
+Bayes nets originally arose out of an attempt to add probabilities to
+expert systems, and this is still the most common use for BNs.
+A famous example is
+QMR-DT, a decision-theoretic reformulation of the Quick Medical
+Reference (QMR) model.
+
+
+
+
+Here, the top layer represents hidden disease nodes, and the bottom
+layer represents observed symptom nodes.
+The goal is to infer the posterior probability of each disease given
+all the symptoms (which can be present, absent or unknown).
+Each node in the top layer has a Bernoulli prior (with a low prior
+probability that the disease is present).
+Since each node in the bottom layer has a high fan-in, we use a
+noisy-OR parameterization; each disease has an independent chance of
+causing each symptom.
+The real QMR-DT model is copyright, but
+we can create a random QMR-like model as follows.
+
+function bnet = mk_qmr_bnet(G, inhibit, leak, prior)
+% MK_QMR_BNET Make a QMR model
+% bnet = mk_qmr_bnet(G, inhibit, leak, prior)
+%
+% G(i,j) = 1 iff there is an arc from disease i to finding j
+% inhibit(i,j) = inhibition probability on i->j arc
+% leak(j) = inhibition prob. on leak->j arc
+% prior(i) = prob. disease i is on
+
+[Ndiseases Nfindings] = size(inhibit);
+N = Ndiseases + Nfindings;
+finding_node = Ndiseases+1:N;
+ns = 2*ones(1,N);
+dag = zeros(N,N);
+dag(1:Ndiseases, finding_node) = G;
+bnet = mk_bnet(dag, ns, 'observed', finding_node);
+
+for d=1:Ndiseases
+ CPT = [1-prior(d) prior(d)];
+ bnet.CPD{d} = tabular_CPD(bnet, d, CPT');
+end
+
+for i=1:Nfindings
+ fnode = finding_node(i);
+ ps = parents(G, i);
+ bnet.CPD{fnode} = noisyor_CPD(bnet, fnode, leak(i), inhibit(ps, i));
+end
+
+In the file BNT/examples/static/qmr1, we create a random bipartite
+graph G, with 5 diseases and 10 findings, and random parameters.
+(In general, to create a random dag, use 'mk_random_dag'.)
+We can visualize the resulting graph structure using
+the methods discussed below, with the
+following results:
+
+
+
+
+Now let us put some random evidence on all the leaves except the very
+first and very last, and compute the disease posteriors.
+
+Junction tree can be quite slow on large QMR models.
+Fortunately, it is possible to exploit properties of the noisy-OR
+function to speed up exact inference using an algorithm called
+quickscore, discussed below.
+
+
+
+
+
+
+
+A conditional Gaussian model is one in which, conditioned on all the discrete
+nodes, the distribution over the remaining (continuous) nodes is
+multivariate Gaussian. This means we can have arcs from discrete (D)
+to continuous (C) nodes, but not vice versa.
+(We are allowed C->D arcs if the continuous nodes are observed,
+as in the mixture of experts model,
+since this distribution can be represented with a discrete potential.)
+
+We now give an example of a CG model, from
+the paper "Propagation of Probabilities, Means amd
+Variances in Mixed Graphical Association Models", Steffen Lauritzen,
+JASA 87(420):1098--1108, 1992 (reprinted in the book "Probabilistic Networks and Expert
+Systems", R. G. Cowell, A. P. Dawid, S. L. Lauritzen and
+D. J. Spiegelhalter, Springer, 1999.)
+
+
Specifying the graph
+
+Consider the model of waste emissions from an incinerator plant shown below.
+We follow the standard convention that shaded nodes are observed,
+clear nodes are hidden.
+We also use the non-standard convention that
+square nodes are discrete (tabular) and round nodes are
+Gaussian.
+
+
+
+
+
+
+
+We can create this model as follows.
+
+F = 1; W = 2; E = 3; B = 4; C = 5; D = 6; Min = 7; Mout = 8; L = 9;
+n = 9;
+
+dag = zeros(n);
+dag(F,E)=1;
+dag(W,[E Min D]) = 1;
+dag(E,D)=1;
+dag(B,[C D])=1;
+dag(D,[L Mout])=1;
+dag(Min,Mout)=1;
+
+% node sizes - all cts nodes are scalar, all discrete nodes are binary
+ns = ones(1, n);
+dnodes = [F W B];
+cnodes = mysetdiff(1:n, dnodes);
+ns(dnodes) = 2;
+
+bnet = mk_bnet(dag, ns, 'discrete', dnodes);
+
+'dnodes' is a list of the discrete nodes; 'cnodes' is the continuous
+nodes. 'mysetdiff' is a faster version of the built-in 'setdiff'.
+
+
+
+
Specifying the parameters
+
+The parameters of the discrete nodes can be specified as follows.
+
+
+'marg' is a structure that contains the fields 'mu' and 'Sigma', which
+contain the mean and (co)variance of the marginal on E.
+In this case, they are both scalars.
+Let us check they match the published figures (to 2 decimal places).
+
+
+It is easy to visualize this posterior using standard Matlab plotting
+functions, e.g.,
+
+gaussplot2d(marg.mu, marg.Sigma);
+
+produces the following picture.
+
+
+
+
+
+
+
+
+The T field indicates that the mixing weight of this Gaussian
+component is 1.0.
+If the joint contains discrete and continuous variables, the result
+will be a mixture of Gaussians, e.g.,
+
+The interpretation is
+Sigma(i,j,k) = Cov[ E(i) E(j) | F=k ].
+In this case, E is a scalar, so i=j=1; k specifies the mixture component.
+
+We saw in the sprinkler network that BNT sets the effective size of
+observed discrete nodes to 1, since they only have one legal value.
+For continuous nodes, BNT sets their length to 0,
+since they have been reduced to a point.
+For example,
+
+It is simple to post-process the output of marginal_nodes.
+For example, the file BNT/examples/static/cg1 sets the mu term of
+observed nodes to their observed value, and the Sigma term to 0 (since
+observed nodes have no variance).
+
+
+Note that the implemented version of the junction tree is numerically
+unstable when using CG potentials
+(which is why, in the example above, we only required our answers to agree with
+the published ones to 2dp.)
+This is why you might want to use stab_cond_gauss_inf_engine,
+implemented by Shan Huang. This is described in
+
+
+
"Stable Local Computation with Conditional Gaussian Distributions",
+S. Lauritzen and F. Jensen, Tech Report R-99-2014,
+Dept. Math. Sciences, Allborg Univ., 1999.
+
+
+However, even the numerically stable version
+can be computationally intractable if there are many hidden discrete
+nodes, because the number of mixture components grows exponentially e.g., in a
+switching linear dynamical system.
+In general, one must resort to approximate inference techniques: see
+the discussion on inference engines below.
+
+
+
+
+The parameter estimation routines in BNT can be classified into 4
+types, depending on whether the goal is to compute
+a full (Bayesian) posterior over the parameters or just a point
+estimate (e.g., Maximum Likelihood or Maximum A Posteriori),
+and whether all the variables are fully observed or there is missing
+data/ hidden variables (partial observability).
+
+
+To load numeric data from an ASCII text file called 'dat.txt', where each row is a
+case and columns are separated by white-space, such as
+
+011979 1626.5 0.0
+021979 1367.0 0.0
+...
+
+you can use
+
+data = load('dat.txt');
+
+or
+
+load dat.txt -ascii
+
+In the latter case, the data is stored in a variable called 'dat' (the
+filename minus the extension).
+Alternatively, suppose the data is stored in a .csv file (has commas
+separating the columns, and contains a header line), such as
+
+header info goes here
+ORD,011979,1626.5,0.0
+DSM,021979,1367.0,0.0
+...
+
+If your file is not in either of these formats, you can either use Perl to convert
+it to this format, or use the Matlab scanf command.
+Type
+
+help iofun
+
+for more information on Matlab's file functions.
+
+
+BNT learning routines require data to be stored in a cell array.
+data{i,m} is the value of node i in case (example) m, i.e., each
+column is a case.
+If node i is not observed in case m (missing value), set
+data{i,m} = [].
+(Not all the learning routines can cope with such missing values, however.)
+In the special case that all the nodes are observed and are
+scalar-valued (as opposed to vector-valued), the data can be
+stored in a matrix (as opposed to a cell-array).
+
+Suppose, as in the mixture of experts example,
+that we have 3 nodes in the graph: X(1) is the observed input, X(3) is
+the observed output, and X(2) is a hidden (gating) node. We can
+create the dataset as follows.
+
+
+As an example, let's generate some data from the sprinkler network, randomize the parameters,
+and then try to recover the original model.
+First we create some training data using forwards sampling.
+
+samples{j,i} contains the value of the j'th node in case i.
+sample_bnet returns a cell array because, in general, each node might
+be a vector of different length.
+In this case, all nodes are discrete (and hence scalars), so we
+could have used a regular array instead (which can be quicker):
+
+data = cell2num(samples);
+
+Now we create a network with random parameters.
+(The initial values of bnet2 don't matter in this case, since we can find the
+globally optimal MLE independent of where we start.)
+
+% Make a tabula rasa
+bnet2 = mk_bnet(dag, node_sizes);
+seed = 0;
+rand('state', seed);
+bnet2.CPD{C} = tabular_CPD(bnet2, C);
+bnet2.CPD{R} = tabular_CPD(bnet2, R);
+bnet2.CPD{S} = tabular_CPD(bnet2, S);
+bnet2.CPD{W} = tabular_CPD(bnet2, W);
+
+Finally, we find the maximum likelihood estimates of the parameters.
+
+bnet3 = learn_params(bnet2, samples);
+
+To view the learned parameters, we use a little Matlab hackery.
+
+
+Currently, only tabular CPDs can have priors on their parameters.
+The conjugate prior for a multinomial is the Dirichlet.
+(For binary random variables, the multinomial is the same as the
+Bernoulli, and the Dirichlet is the same as the Beta.)
+
+The Dirichlet has a simple interpretation in terms of pseudo counts.
+If we let N_ijk = the num. times X_i=k and Pa_i=j occurs in the
+training set, where Pa_i are the parents of X_i,
+then the maximum likelihood (ML) estimate is
+T_ijk = N_ijk / N_ij (where N_ij = sum_k' N_ijk'), which will be 0 if N_ijk=0.
+To prevent us from declaring that (X_i=k, Pa_i=j) is impossible just because this
+event was not seen in the training set,
+we can pretend we saw value k of X_i, for each value j of Pa_i some number (alpha_ijk)
+of times in the past.
+The MAP (maximum a posterior) estimate is then
+
+and is never 0 if all alpha_ijk > 0.
+For example, consider the network A->B, where A is binary and B has 3
+values.
+A uniform prior for B has the form
+
+ B=1 B=2 B=3
+A=1 1 1 1
+A=2 1 1 1
+
+which can be created using
+
+tabular_CPD(bnet, i, 'prior_type', 'dirichlet', 'dirichlet_type', 'unif');
+
+This prior does not satisfy the likelihood equivalence principle,
+which says that Markov equivalent models
+should have the same marginal likelihood.
+A prior that does satisfy this principle is shown below.
+Heckerman (1995) calls this the
+BDeu prior (likelihood equivalent uniform Bayesian Dirichlet).
+
+ B=1 B=2 B=3
+A=1 1/6 1/6 1/6
+A=2 1/6 1/6 1/6
+
+where we put N/(q*r) in each bin; N is the equivalent sample size,
+r=|A|, q = |B|.
+This can be created as follows
+
+tabular_CPD(bnet, i, 'prior_type', 'dirichlet', 'dirichlet_type', 'BDeu');
+
+Here, 1 is the equivalent sample size, and is the strength of the
+prior.
+You can change this using
+
+bnet.CPD{i}.prior contains the new Dirichlet pseudocounts,
+and bnet.CPD{i}.CPT is set to the mean of the posterior (the
+normalized counts).
+(Hence if the initial pseudo counts are 0,
+bayes_update_params and learn_params will give the
+same result.)
+
+
+
+
+
+We can compute the same result sequentially (on-line) as follows.
+
+
+The file BNT/examples/static/StructLearn/model_select1 has an example of
+sequential model selection which uses the same idea.
+We generate data from the model A->B
+and compute the posterior prob of all 3 dags on 2 nodes:
+ (1) A B, (2) A <- B , (3) A -> B
+Models 2 and 3 are Markov equivalent, and therefore indistinguishable from
+observational data alone, so we expect their posteriors to be the same
+(assuming a prior which satisfies likelihood equivalence).
+If we use random parameters, the "true" model only gets a higher posterior after 2000 trials!
+However, if we make B a noisy NOT gate, the true model "wins" after 12
+trials, as shown below (red = model 1, blue/green (superimposed)
+represents models 2/3).
+
+
+
+The use of marginal likelihood for model selection is discussed in
+greater detail in the
+section on structure learning.
+
+
+
+
+
+samples2{i,l} is the value of node i in training case l, or [] if unobserved.
+
+Now we will compute the MLEs using the EM algorithm.
+We need to use an inference algorithm to compute the expected
+sufficient statistics in the E step; the M (maximization) step is as
+above.
+
+
+In networks with repeated structure (e.g., chains and grids), it is
+common to assume that the parameters are the same at every node. This
+is called parameter tying, and reduces the amount of data needed for
+learning.
+
+When we have tied parameters, there is no longer a one-to-one
+correspondence between nodes and CPDs.
+Rather, each CPD species the parameters for a whole equivalence class
+of nodes.
+It is easiest to see this by example.
+Consider the following hidden Markov
+model (HMM)
+
+
+
+
+When HMMs are used for semi-infinite processes like speech recognition,
+we assume the transition matrix
+P(H(t+1)|H(t)) is the same for all t; this is called a time-invariant
+or homogenous Markov chain.
+Hence hidden nodes 2, 3, ..., T
+are all in the same equivalence class, say class Hclass.
+Similarly, the observation matrix P(O(t)|H(t)) is assumed to be the
+same for all t, so the observed nodes are all in the same equivalence
+class, say class Oclass.
+Finally, the prior term P(H(1)) is in a class all by itself, say class
+H1class.
+This is illustrated below, where we explicitly represent the
+parameters as random variables (dotted nodes).
+
+
+
+In BNT, we cannot represent parameters as random variables (nodes).
+Instead, we "hide" the
+parameters inside one CPD for each equivalence class,
+and then specify that the other CPDs should share these parameters, as
+follows.
+
+hnodes = 1:2:2*T;
+onodes = 2:2:2*T;
+H1class = 1; Hclass = 2; Oclass = 3;
+eclass = ones(1,N);
+eclass(hnodes(2:end)) = Hclass;
+eclass(hnodes(1)) = H1class;
+eclass(onodes) = Oclass;
+% create dag and ns in the usual way
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'equiv_class', eclass);
+
+Finally, we define the parameters for each equivalence class:
+
+In general, if bnet.CPD{e} = xxx_CPD(bnet, j), then j should be a
+member of e's equivalence class; that is, it is not always the case
+that e == j. You can use bnet.rep_of_eclass(e) to return the
+representative of equivalence class e.
+BNT will look up the parents of j to determine the size
+of the CPT to use. It assumes that this is the same for all members of
+the equivalence class.
+Click here for
+a more complex example of parameter tying.
+
+Note:
+Normally one would define an HMM as a
+Dynamic Bayes Net
+(see the function BNT/examples/dynamic/mk_chmm.m).
+However, one can define an HMM as a static BN using the function
+BNT/examples/static/Models/mk_hmm_bnet.m.
+
+
+
+
+
+Update (9/29/03):
+Phillipe LeRay is developing some additional structure learning code
+on top of BNT. Click here
+for details.
+
+
+
+There are two very different approaches to structure learning:
+constraint-based and search-and-score.
+In the constraint-based approach,
+we start with a fully connected graph, and remove edges if certain
+conditional independencies are measured in the data.
+This has the disadvantage that repeated independence tests lose
+statistical power.
+
+In the more popular search-and-score approach,
+we perform a search through the space of possible DAGs, and either
+return the best one found (a point estimate), or return a sample of the
+models found (an approximation to the Bayesian posterior).
+
+Unfortunately, the number of DAGs as a function of the number of
+nodes, G(n), is super-exponential in n.
+A closed form formula for G(n) is not known, but the first few values
+are shown below (from Cooper, 1999).
+
+
+
n
G(n)
+
1
1
+
2
3
+
3
25
+
4
543
+
5
29,281
+
6
3,781,503
+
7
1.1 x 10^9
+
8
7.8 x 10^11
+
9
1.2 x 10^15
+
10
4.2 x 10^18
+
+
+Since the number of DAGs is super-exponential in the number of nodes,
+we cannot exhaustively search the space, so we either use a local
+search algorithm (e.g., greedy hill climbining, perhaps with multiple
+restarts) or a global search algorithm (e.g., Markov Chain Monte
+Carlo).
+
+If we know a total ordering on the nodes,
+finding the best structure amounts to picking the best set of parents
+for each node independently.
+This is what the K2 algorithm does.
+If the ordering is unknown, we can search over orderings,
+which is more efficient than searching over DAGs (Koller and Friedman, 2000).
+
+In addition to the search procedure, we must specify the scoring
+function. There are two popular choices. The Bayesian score integrates
+out the parameters, i.e., it is the marginal likelihood of the model.
+The BIC (Bayesian Information Criterion) is defined as
+log P(D|theta_hat) - 0.5*d*log(N), where D is the data, theta_hat is
+the ML estimate of the parameters, d is the number of parameters, and
+N is the number of data cases.
+The BIC method has the advantage of not requiring a prior.
+
+BIC can be derived as a large sample
+approximation to the marginal likelihood.
+(It is also equal to the Minimum Description Length of a model.)
+However, in practice, the sample size does not need to be very large
+for the approximation to be good.
+For example, in the figure below, we plot the ratio between the log marginal likelihood
+and the BIC score against data-set size; we see that the ratio rapidly
+approaches 1, especially for non-informative priors.
+(This plot was generated by the file BNT/examples/static/bic1.m. It
+uses the water sprinkler BN with BDeu Dirichlet priors with different
+equivalent sample sizes.)
+
+
+
+
+
+
+
+
+As with parameter learning, handling missing data/ hidden variables is
+much harder than the fully observed case.
+The structure learning routines in BNT can therefore be classified into 4
+types, analogously to the parameter learning case.
+
+
+If two DAGs encode the same conditional independencies, they are
+called Markov equivalent. The set of all DAGs can be paritioned into
+Markov equivalence classes. Graphs within the same class can
+have
+the direction of some of their arcs reversed without changing any of
+the CI relationships.
+Each class can be represented by a PDAG
+(partially directed acyclic graph) called an essential graph or
+pattern. This specifies which edges must be oriented in a certain
+direction, and which may be reversed.
+
+
+When learning graph structure from observational data,
+the best one can hope to do is to identify the model up to Markov
+equivalence. To distinguish amongst graphs within the same equivalence
+class, one needs interventional data: see the discussion on active learning below.
+
+
+
+
+
+The brute-force approach to structure learning is to enumerate all
+possible DAGs, and score each one. This provides a "gold standard"
+with which to compare other algorithms. We can do this as follows.
+
+where data(i,m) is the value of node i in case m,
+and ns(i) is the size of node i.
+If the DAGs have a lot of families in common, we can cache the sufficient statistics,
+making this potentially more efficient than scoring the DAGs one at a time.
+(Caching is not currently implemented, however.)
+
+By default, we use the Bayesian scoring metric, and assume CPDs are
+represented by tables with BDeu(1) priors.
+We can override these defaults as follows.
+If we want to use uniform priors, we can say
+
+params{i} is a cell-array, containing optional arguments that are
+passed to the constructor for CPD i.
+
+Now suppose we want to use different node types, e.g.,
+Suppose nodes 1 and 2 are Gaussian, and nodes 3 and 4 softmax (both
+these CPDs can support discrete and continuous parents, which is
+necessary since all other nodes will be considered as parents).
+The Bayesian scoring metric currently only works for tabular CPDs, so
+we will use BIC:
+
+In practice, one can't enumerate all possible DAGs for N > 5,
+but one can evaluate any reasonably-sized set of hypotheses in this
+way (e.g., nearest neighbors of your current best guess).
+Think of this as "computer assisted model refinement" as opposed to de
+novo learning.
+
+
+
+
+The K2 algorithm (Cooper and Herskovits, 1992) is a greedy search algorithm that works as follows.
+Initially each node has no parents. It then adds incrementally that parent whose addition most
+increases the score of the resulting structure. When the addition of no single
+parent can increase the score, it stops adding parents to the node.
+Since we are using a fixed ordering, we do not need to check for
+cycles, and can choose the parents for each node independently.
+
+The original paper used the Bayesian scoring
+metric with tabular CPDs and Dirichlet priors.
+BNT generalizes this to allow any kind of CPD, and either the Bayesian
+scoring metric or BIC, as in the example above.
+In addition, you can specify
+an optional upper bound on the number of parents for each node.
+The file BNT/examples/static/k2demo1.m gives an example of how to use K2.
+We use the water sprinkler network and sample 100 cases from it as before.
+Then we see how much data it takes to recover the generating structure:
+
+So we see it takes about sz(10)=50 cases. (BIC behaves similarly,
+showing that the prior doesn't matter too much.)
+In general, we cannot hope to recover the "true" generating structure,
+only one that is in its Markov equivalence
+class.
+
+
+
+
+Hill-climbing starts at a specific point in space,
+considers all nearest neighbors, and moves to the neighbor
+that has the highest score; if no neighbors have higher
+score than the current point (i.e., we have reached a local maximum),
+the algorithm stops. One can then restart in another part of the space.
+
+A common definition of "neighbor" is all graphs that can be
+generated from the current graph by adding, deleting or reversing a
+single arc, subject to the acyclicity constraint.
+Other neighborhoods are possible: see
+
+Optimal Structure Identification with Greedy Search, Max
+Chickering, JMLR 2002.
+
+
+
+
+
+
+We can use a Markov Chain Monte Carlo (MCMC) algorithm called
+Metropolis-Hastings (MH) to search the space of all
+DAGs.
+The standard proposal distribution is to consider moving to all
+nearest neighbors in the sense defined above.
+
+The function can be called
+as in the following example.
+
+We can also plot the acceptance ratio versus number of MCMC steps,
+as a crude convergence diagnostic.
+
+clf
+plot(accept_ratio)
+
+
+
+Even though the number of samples needed by MCMC is theoretically
+polynomial (not exponential) in the dimensionality of the search space, in practice it has been
+found that MCMC does not converge in reasonable time for graphs with
+more than about 10 nodes.
+
+
+
+
+
+
+As was mentioned above,
+one can only learn a DAG up to Markov equivalence, even given infinite data.
+If one is interested in learning the structure of a causal network,
+one needs interventional data.
+(By "intervention" we mean forcing a node to take on a specific value,
+thereby effectively severing its incoming arcs.)
+
+Most of the scoring functions accept an optional argument
+that specifies whether a node was observed to have a certain value, or
+was forced to have that value: we set clamped(i,m)=1 if node i was
+forced in training case m. e.g., see the file
+BNT/examples/static/cooper_yoo.
+
+An interesting question is to decide which interventions to perform
+(c.f., design of experiments). For details, see the following tech
+report
+
+
+Computing the Bayesian score when there is partial observability is
+computationally challenging, because the parameter posterior becomes
+multimodal (the hidden nodes induce a mixture distribution).
+One therefore needs to use approximations such as BIC.
+Unfortunately, search algorithms are still expensive, because we need
+to run EM at each step to compute the MLE, which is needed to compute
+the score of each model. An alternative approach is
+to do the local search steps inside of the M step of EM, which is more
+efficient since the data has been "filled in" - this is
+called the structural EM algorithm (Friedman 1997), and provably
+converges to a local maximum of the BIC score.
+
+Wei Hu has implemented SEM for discrete nodes.
+You can download his package from
+here.
+Please address all questions about this code to
+wei.hu@intel.com.
+See also Phl's implementation of SEM.
+
+
+
+
+
+
+You can visualize an arbitrary graph (such as one learned using the
+structure learning routines) with Matlab code contributed by
+Ali
+Taylan Cemgil
+from the University of Nijmegen.
+For static BNs, call it as follows:
+
+If you install the excellent graphhviz, an
+open-source graph visualization package from AT&T,
+you can create a much better visualization as follows
+
+graph_to_dot(bnet.dag)
+
+This works by converting the adjacency matrix to a file suitable
+for input to graphviz (using the dot format),
+then converting the output of graphviz to postscript, and displaying the results using
+ghostview.
+You can do each of these steps separately for more control, as shown
+below.
+
+
+The IC algorithm (Pearl and Verma, 1991),
+and the faster, but otherwise equivalent, PC algorithm (Spirtes, Glymour, and Scheines 1993),
+computes many conditional independence tests,
+and combines these constraints into a
+PDAG to represent the whole
+Markov equivalence class.
+
+IC*/FCI extend IC/PC to handle latent variables: see below.
+(IC stands for inductive causation; PC stands for Peter and Clark,
+the first names of Spirtes and Glymour; FCI stands for fast causal
+inference.
+What we, following Pearl (2000), call IC* was called
+IC in the original Pearl and Verma paper.)
+For details, see
+
+
+The PC algorithm takes as arguments a function f, the number of nodes N,
+the maximum fan in K, and additional arguments A which are passed to f.
+The function f(X,Y,S,A) returns 1 if X is conditionally independent of Y given S, and 0
+otherwise.
+For example, suppose we cheat by
+passing in a CI "oracle" which has access to the true DAG; the oracle
+tests for d-separation in this DAG, i.e.,
+f(X,Y,S) calls dsep(X,Y,S,dag). We can to this as follows.
+
+pdag = learn_struct_pdag_pc('dsep', N, max_fan_in, dag);
+
+pdag(i,j) = -1 if there is definitely an i->j arc,
+and pdag(i,j) = 1 if there is either an i->j or and i<-j arc.
+
+So as expected, we see that the V-structure at the W node is uniquely identified,
+but the other arcs have ambiguous orientation.
+
+We now give an example from p141 (1st edn) / p103 (2nd end) of the SGS
+book.
+This example concerns the female orgasm.
+We are given a correlation matrix C between 7 measured factors (such
+as subjective experiences of coital and masturbatory experiences),
+derived from 281 samples, and want to learn a causal model of the
+data. We will not discuss the merits of this type of work here, but
+merely show how to reproduce the results in the SGS book.
+Their program,
+Tetrad,
+makes use of the Fisher Z-test for conditional
+independence, so we do the same:
+
+The results match those of Fig 12a of SGS apart from two edge
+differences; presumably this is due to rounding error (although it
+could be a bug, either in BNT or in Tetrad).
+This example can be found in the file BNT/examples/static/pc2.m.
+
+
+
+The IC* algorithm (Pearl and Verma, 1991),
+and the faster FCI algorithm (Spirtes, Glymour, and Scheines 1993),
+are like the IC/PC algorithm, except that they can detect the presence
+of latent variables.
+See the file learn_struct_pdag_ic_star written by Tamar
+Kushnir. The output is a matrix P, defined as follows
+(see Pearl (2000), p52 for details):
+
+% P(i,j) = -1 if there is either a latent variable L such that i <-L->j OR there is a directed edge from i->j.
+% P(i,j) = -2 if there is a marked directed i-*>j edge.
+% P(i,j) = P(j,i) = 1 if there is and undirected edge i--j
+% P(i,j) = P(j,i) = 2 if there is a latent variable L such that i<-L->j.
+
+
+Up until now, we have used the junction tree algorithm for inference.
+However, sometimes this is too slow, or not even applicable.
+In general, there are many inference algorithms each of which make
+different tradeoffs between speed, accuracy, complexity and
+generality. Furthermore, there might be many implementations of the
+same algorithm; for instance, a general purpose, readable version,
+and a highly-optimized, specialized one.
+To cope with this variety, we treat each inference algorithm as an
+object, which we call an inference engine.
+
+
+An inference engine is an object that contains a bnet and supports the
+'enter_evidence' and 'marginal_nodes' methods. The engine constructor
+takes the bnet as argument and may do some model-specific processing.
+When 'enter_evidence' is called, the engine may do some
+evidence-specific processing. Finally, when 'marginal_nodes' is
+called, the engine may do some query-specific processing.
+
+
+The amount of work done when each stage is specified -- structure,
+parameters, evidence, and query -- depends on the engine. The cost of
+work done early in this sequence can be amortized. On the other hand,
+one can make better optimizations if one waits until later in the
+sequence.
+For example, the parameters might imply
+conditional indpendencies that are not evident in the graph structure,
+but can nevertheless be exploited; the evidence indicates which nodes
+are observed and hence can effectively be disconnected from the
+graph; and the query might indicate that large parts of the network
+are d-separated from the query nodes. (Since it is not the actual
+values of the evidence that matters, just which nodes are observed,
+many engines allow you to specify which nodes will be observed when they are constructed,
+i.e., before calling 'enter_evidence'. Some engines can still cope if
+the actual pattern of evidence is different, e.g., if there is missing
+data.)
+
+
+Although being maximally lazy (i.e., only doing work when a query is
+issued) may seem desirable,
+this is not always the most efficient.
+For example,
+when learning using EM, we need to call marginal_nodes N times, where N is the
+number of nodes. Variable elimination would end
+up repeating a lot of work
+each time marginal_nodes is called, making it inefficient for
+learning. The junction tree algorithm, by contrast, uses dynamic
+programming to avoid this redundant computation --- it calculates all
+marginals in two passes during 'enter_evidence', so calling
+'marginal_nodes' takes constant time.
+
+We will discuss some of the inference algorithms implemented in BNT
+below, and finish with a summary of all
+of them.
+
+
+
+
+
+
+
+
+
+The variable elimination algorithm, also known as bucket elimination
+or peeling, is one of the simplest inference algorithms.
+The basic idea is to "push sums inside of products"; this is explained
+in more detail
+here.
+
+The principle of distributing sums over products can be generalized
+greatly to apply to any commutative semiring.
+This forms the basis of many common algorithms, such as Viterbi
+decoding and the Fast Fourier Transform. For details, see
+
+
+
R. McEliece and S. M. Aji, 2000.
+
+
+The Generalized Distributive Law,
+IEEE Trans. Inform. Theory, vol. 46, no. 2 (March 2000),
+pp. 325--343.
+
+
+
+Choosing an order in which to sum out the variables so as to minimize
+computational cost is known to be NP-hard.
+The implementation of this algorithm in
+var_elim_inf_engine makes no attempt to optimize this
+ordering (in contrast, say, to jtree_inf_engine, which uses a
+greedy search procedure to find a good ordering).
+
+Note: unlike most algorithms, var_elim does all its computational work
+inside of marginal_nodes, not inside of
+enter_evidence.
+
+
+
+
+
+
+The simplest inference algorithm of all is to explicitely construct
+the joint distribution over all the nodes, and then to marginalize it.
+This is implemented in global_joint_inf_engine.
+Since the size of the joint is exponential in the
+number of discrete (hidden) nodes, this is not a very practical algorithm.
+It is included merely for pedagogical and debugging purposes.
+
+Three specialized versions of this algorithm have also been implemented,
+corresponding to the cases where all the nodes are discrete (D), all
+are Gaussian (G), and some are discrete and some Gaussian (CG).
+They are called enumerative_inf_engine,
+gaussian_inf_engine,
+and cond_gauss_inf_engine respectively.
+
+Note: unlike most algorithms, these global inference algorithms do all their computational work
+inside of marginal_nodes, not inside of
+enter_evidence.
+
+
+
+
+The junction tree algorithm is quite slow on the QMR network,
+since the cliques are so big.
+One simple trick we can use is to notice that hidden leaves do not
+affect the posteriors on the roots, and hence do not need to be
+included in the network.
+A second trick is to notice that the negative findings can be
+"absorbed" into the prior:
+see the file
+BNT/examples/static/mk_minimal_qmr_bnet for details.
+
+
+A much more significant speedup is obtained by exploiting special
+properties of the noisy-or node, as done by the quickscore
+algorithm. For details, see
+
+
Heckerman, "A tractable inference algorithm for diagnosing multiple diseases", UAI 89.
+
Rish and Dechter, "On the impact of causal independence", UCI
+tech report, 1998.
+
+
+This has been implemented in BNT as a special-purpose inference
+engine, which can be created and used as follows:
+
+
+Even using quickscore, exact inference takes time that is exponential
+in the number of positive findings.
+Hence for large networks we need to resort to approximate inference techniques.
+See for example
+
+
T. Jaakkola and M. Jordan, "Variational probabilistic inference and the
+QMR-DT network", JAIR 10, 1999.
+
+
K. Murphy, Y. Weiss and M. Jordan, "Loopy belief propagation for approximate inference: an empirical study",
+ UAI 99.
+
+The latter approximation
+entails applying Pearl's belief propagation algorithm to a model even
+if it has loops (hence the name loopy belief propagation).
+Pearl's algorithm, implemented as pearl_inf_engine, gives
+exact results when applied to singly-connected graphs
+(a.k.a. polytrees, since
+the underlying undirected topology is a tree, but a node may have
+multiple parents).
+To apply this algorithm to a graph with loops,
+use pearl_inf_engine.
+This can use a centralized or distributed message passing protocol.
+You can use it as in the following example.
+
+We found that this algorithm often converges, and when it does, often
+is very accurate, but it depends on the precise setting of the
+parameter values of the network.
+(See the file BNT/examples/static/qmr1 to repeat the experiment for yourself.)
+Understanding when and why belief propagation converges/ works
+is a topic of ongoing research.
+
+pearl_inf_engine can exploit special structure in noisy-or
+and gmux nodes to compute messages efficiently.
+
+belprop_inf_engine is like pearl, but uses potentials to
+represent messages. Hence this is slower.
+
+belprop_fg_inf_engine is like belprop,
+but is designed for factor graphs.
+
+
+
+
+
+BNT now (Mar '02) has two sampling (Monte Carlo) inference algorithms:
+
+
likelihood_weighting_inf_engine which does importance
+sampling and can handle any node type.
+
gibbs_sampling_inf_engine, written by Bhaskara Marthi.
+Currently this can only handle tabular CPDs.
+For a much faster and more powerful Gibbs sampling program, see
+BUGS.
+
+Note: To generate samples from a network (which is not the same as inference!),
+use sample_bnet.
+
+
+
+
+
+
+The inference engines differ in many ways. Here are
+some of the major "axes":
+
+
Works for all topologies or makes restrictions?
+
Works for all node types or makes restrictions?
+
Exact or approximate inference?
+
+
+
+In terms of topology, most engines handle any kind of DAG.
+belprop_fg does approximate inference on factor graphs (FG), which
+can be used to represent directed, undirected, and mixed (chain)
+graphs.
+(In the future, we plan to support exact inference on chain graphs.)
+quickscore only works on QMR-like models.
+
+In terms of node types: algorithms that use potentials can handle
+discrete (D), Gaussian (G) or conditional Gaussian (CG) models.
+Sampling algorithms can essentially handle any kind of node (distribution).
+Other algorithms make more restrictive assumptions in exchange for
+speed.
+
+Finally, most algorithms are designed to give the exact answer.
+The belief propagation algorithms are exact if applied to trees, and
+in some other cases.
+Sampling is considered approximate, even though, in the limit of an
+infinite number of samples, it gives the exact answer.
+
+
+
+Here is a summary of the properties
+of all the engines in BNT which work on static networks.
+
+LIMIDs explicitely show all information arcs, rather than implicitely
+assuming no forgetting. This allows them to model forgetful
+controllers.
+
+See the examples in BNT/examples/limids for details.
+
+
+
+
+
DBNs, HMMs, Kalman filters and all that
+
+Click here for documentation about how to
+use BNT for dynamical systems and sequence data.
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/usage_cropped.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/usage_cropped.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3232 @@
+
+How to use the Bayes Net Toolbox
+
+
+
+
+
+
How to use the Bayes Net Toolbox
+
+This documentation was last updated on 13 November 2002.
+
+Click here for a list of changes made to
+BNT.
+
+Click
+here
+for a French version of this documentation (which might not
+be up-to-date).
+
+
+
Unpack the file. In Unix, type
+
+"unzip FullBNT.zip".
+In Windows, use
+a program like Winzip. This will
+create a directory called FullBNT, which contains BNT and other libraries.
+
+
+
Read the file BNT/README to make sure the date
+matches the one on the top of the BNT home page.
+If not, you may need to press 'refresh' on your browser, and download
+again, to get the most recent version.
+
+
+
Edit the file "BNT/add_BNT_to_path.m" so it contains the correct
+pathname.
+For example, in Windows,
+I download FullBNT.zip into C:\kpmurphy\matlab, and
+then comment out the second line (with the % character), and uncomment
+the third line, which reads
+
+BNT_HOME = 'C:\kpmurphy\matlab\FullBNT';
+
+
+
+
Start up Matlab.
+
+
+
Type "ver" at the Matlab prompt (">>").
+You need Matlab version 5.2 or newer to run BNT.
+(Versions 5.0 and 5.1 have a memory leak which seems to sometimes
+crash BNT.)
+
+
+
Move to the BNT directory.
+For example, in Windows, I type
+
+>> cd C:\kpmurphy\matlab\FullBNT\BNT
+
+
+
+
Type "add_BNT_to_path".
+This executes the command
+addpath(genpath(BNT_HOME)),
+which adds all directories below FullBNT to the matlab path.
+
+
+
Type "test_BNT".
+If all goes well, this will just produce a bunch of numbers and pictures.
+It may produce some
+warning messages (which you can ignore), but should not produce any error messages.
+(The warnings should only be of the form
+"Warning: Maximum number of iterations has been exceeded", and are
+produced by Netlab.)
+
+
+
+
+If you are new to Matlab, you might like to check out
+some useful Matlab tips.
+For instance, this explains how to create a startup file, which can be
+used to set your path variable automatically, so you can avoid having
+to type the above commands every time.
+
+
+
+
+
+
+Some BNT functions also have C implementations.
+It is not necessary to install the C code, but it can result in a speedup
+of a factor of 2-5 of certain simple functions.
+To install all the C code,
+edit installC_BNT.m so it contains the right path,
+then type installC_BNT.
+To uninstall all the C code,
+edit uninstallC_BNT.m so it contains the right path,
+then type uninstallC_BNT.
+For an up-to-date list of the files which have C implementations, see
+BNT/installC_BNT.m.
+
+
+mex is a script that lets you call C code from Matlab - it does not compile matlab to
+C (see mcc below).
+If your C/C++ compiler is set up correctly, mex should work out of
+the box. (Matlab 6 now ships with its own minimal C compiler.)
+If not, you might need to type
+
+In general, typing
+'mex foo.c' from inside Matlab creates a file called
+'foo.mexglx' or 'foo.dll' (the exact file
+extension is system dependent - on Linux it is 'mexglx', on Windows it is '.dll').
+The resulting file will hide the original 'foo.m' (if it existed), i.e.,
+typing 'foo' at the prompt will call the compiled C version.
+To reveal the original matlab version, just delete foo.mexglx (this is
+what uninstallC does).
+
+Sometimes it takes time for Matlab to realize that the file has
+changed from matlab to C or vice versa; try typing 'clear all' or
+restarting Matlab to refresh it.
+To find out which version of a file you are running, type
+'which foo'.
+
+mcc, the
+Matlab to C compiler, is a separate product,
+and is quite different from mex. It does not yet support
+objects/classes, which is why we can't compile all of BNT to C automatically.
+Also, hand-written C code is usually much
+better than the C code generated by mcc.
+
+
+
+Acknowledgements:
+Although I wrote some of the C code, most of
+the C code (e.g., for jtree and dpot) was written by Wei Hu;
+the triangulation C code was written by Ilya Shpitser.
+
+
+
+
+To define a Bayes net, you must specify the graph structure and then
+the parameters. We look at each in turn, using a simple example
+(adapted from Russell and
+Norvig, "Artificial Intelligence: a Modern Approach", Prentice Hall,
+1995, p454).
+
+
+
Graph structure
+
+
+Consider the following network.
+
+
+
+
+
+
+
+
+To specify this directed acyclic graph (dag), we create an adjacency matrix:
+
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+
+We have numbered the nodes as follows:
+Cloudy = 1, Sprinkler = 2, Rain = 3, WetGrass = 4.
+The nodes must always be numbered in topological order, i.e.,
+ancestors before descendants.
+For a more complicated graph, this is a little inconvenient: we will
+see how to get around this below.
+
+
+We list all the different types of CPDs supported by BNT.
+For each CPD, we specify if the child and parents can be discrete (D) or
+continuous (C) (Binary (B) nodes are a special case).
+We also specify which methods each class supports.
+If a method is inherited, the name of the parent class is mentioned.
+If a parent class calls a child method, this is mentioned.
+
+The CPD_to_CPT method converts a CPD to a table; this
+requires that the child and all parents are discrete.
+The CPT might be exponentially big...
+convert_to_table evaluates a CPD with evidence, and
+represents the the resulting potential as an array.
+This requires that the child is discrete, and any continuous parents
+are observed.
+convert_to_pot evaluates a CPD with evidence, and
+represents the resulting potential as a dpot, gpot, cgpot or upot, as
+requested. (d=discrete, g=Gaussian, cg = conditional Gaussian, u =
+utility).
+
+
+When we sample a node, all the parents are observed.
+When we compute the (log) probability of a node, all the parents and
+the child are observed.
+
+We also specify if the parameters are learnable.
+For learning with EM, we require
+the methods reset_ess, update_ess and
+maximize_params.
+For learning from fully observed data, we require
+the method learn_params.
+By default, all classes inherit this from generic_CPD, which simply
+calls update_ess N times, once for each data case, followed
+by maximize_params, i.e., it is like EM, without the E step.
+Some classes implement a batch formula, which is quicker.
+
+Bayesian learning means computing a posterior over the parameters
+given fully observed data.
+
+Pearl means we implement the methods compute_pi and
+compute_lambda_msg, used by
+pearl_inf_engine, which runs on directed graphs.
+belprop_inf_engine only needs convert_to_pot.H
+The pearl methods can exploit special properties of the CPDs for
+computing the messages efficiently, whereas belprop does not.
+
+The only method implemented by generic_CPD is adjustable_CPD,
+which is not shown, since it is not very interesting.
+
+
+
+
+In Figure (a) below, we show how Factor Analysis can be thought of as a
+graphical model. Here, X has an N(0,I) prior, and
+Y|X=x ~ N(mu + Wx, Psi),
+where Psi is diagonal and W is called the "factor loading matrix".
+Since the noise on both X and Y is diagonal, the components of these
+vectors are uncorrelated, and hence can be represented as individual
+scalar nodes, as we show in (b).
+(This is useful if parts of the observations on the Y vector are occasionally missing.)
+We usually take k=|X| << |Y|=D, so the model tries to explain
+many observations using a low-dimensional subspace.
+
+
+
+
+The root node is clamped to the N(0,I) distribution, so that we will
+not update these parameters during learning.
+The mean of the leaf node is clamped to 0,
+since we assume the data has been centered (had its mean subtracted
+off); this is just for simplicity.
+Finally, the covariance of the leaf node is constrained to be
+diagonal. W0 and Psi0 are the initial parameter guesses.
+
+
+We can fit this model (i.e., estimate its parameters in a maximum
+likelihood (ML) sense) using EM, as we
+explain below.
+Not surprisingly, the ML estimates for mu and Psi turn out to be
+identical to the
+sample mean and variance, which can be computed directly as
+
+mu_ML = mean(data);
+Psi_ML = diag(cov(data));
+
+Note that W can only be identified up to a rotation matrix, because of
+the spherical symmetry of the source.
+
+
+If we restrict Psi to be spherical, i.e., Psi = sigma*I,
+there is a closed-form solution for W as well,
+i.e., we do not need to use EM.
+In particular, W contains the first |X| eigenvectors of the sample covariance
+matrix, with scalings determined by the eigenvalues and sigma.
+Classical PCA can be obtained by taking the sigma->0 limit.
+For details, see
+
+
+By adding a hidden discrete variable, we can create mixtures of FA
+models, as shown in (c).
+Now we can explain the data using a set of subspaces.
+We can create this model in BNT as follows.
+
+Notice how the covariance matrix for Y is the same for all values of
+Q; that is, the noise level in each sub-space is assumed the same.
+However, we allow the offset, mu, to vary.
+For details, see
+
+I have included Zoubin's specialized MFA code (with his permission)
+with the toolbox, so you can check that BNT gives the same results:
+see 'BNT/examples/static/mfa1.m'.
+
+
+Independent Factor Analysis (IFA) generalizes FA by allowing a
+non-Gaussian prior on each component of X.
+(Note that we can approximate a non-Gaussian prior using a mixture of
+Gaussians.)
+This means that the likelihood function is no longer rotationally
+invariant, so we can uniquely identify W and the hidden
+sources X.
+IFA also allows a non-diagonal Psi (i.e. correlations between the components of Y).
+We recover classical Independent Components Analysis (ICA)
+in the Psi -> 0 limit, and by assuming that |X|=|Y|, so that the
+weight matrix W is square and invertible.
+For details, see
+
+
+As an example of the use of the softmax function,
+we introduce the Mixture of Experts model.
+
+As before,
+circles denote continuous-valued nodes,
+squares denote discrete nodes, clear
+means hidden, and shaded means observed.
+
+
+
+
+
+
+
+
+
+X is the observed
+input, Y is the output, and
+the Q nodes are hidden "gating" nodes, which select the appropriate
+set of parameters for Y. During training, Y is assumed observed,
+but for testing, the goal is to predict Y given X.
+Note that this is a conditional density model, so we don't
+associate any parameters with X.
+Hence X's CPD will be a root CPD, which is a way of modelling
+exogenous nodes.
+If the output is a continuous-valued quantity,
+we assume the "experts" are linear-regression units,
+and set Y's CPD to linear-Gaussian.
+If the output is discrete, we set Y's CPD to a softmax function.
+The Q CPDs will always be softmax functions.
+
+
+As a concrete example, consider the mixture of experts model where X and Y are
+scalars, and Q is binary.
+This is just piecewise linear regression, where
+we have two line segments, i.e.,
+
+
+
+We can create this model with random parameters as follows.
+(This code is bundled in BNT/examples/static/mixexp2.m.)
+
+This is what the model looks like before training.
+(Thanks to Thomas Hofman for writing this plotting routine.)
+
+
+
+
+
+Now let's train the model, and plot the final performance.
+(We will discuss how to train models in more detail below.)
+
+
+ncases = size(data, 1); % each row of data is a training case
+cases = cell(3, ncases);
+cases([1 3], :) = num2cell(data'); % each column of cases is a training case
+engine = jtree_inf_engine(bnet);
+max_iter = 20;
+[bnet2, LLtrace] = learn_params_em(engine, cases, max_iter);
+
+(We specify which nodes will be observed when we create the engine.
+Hence BNT knows that the hidden nodes are all discrete.
+For complex models, this can lead to a significant speedup.)
+Below we show what the model looks like after 16 iterations of EM
+(with 100 IRLS iterations per M step), when it converged
+using the default convergence tolerance (that the
+fractional change in the log-likelihood be less than 1e-3).
+Before learning, the log-likelihood was
+-322.927442; afterwards, it was -13.728778.
+
+
+
+
+(See BNT/examples/static/mixexp2.m for details of the code.)
+
+
+
+
+
+A hierarchical mixture of experts (HME) extends the mixture of experts
+model by having more than one hidden node. A two-level example is shown below, along
+with its more traditional representation as a neural network.
+This is like a (balanced) probabilistic decision tree of height 2.
+
+
+
+
+
+Pierpaolo Brutti
+has written an extensive set of routines for HMEs,
+which are bundled with BNT: see the examples/static/HME directory.
+These routines allow you to choose the number of hidden (gating)
+layers, and the form of the experts (softmax or MLP).
+See the file hmemenu, which provides a demo.
+For example, the figure below shows the decision boundaries learned
+for a ternary classification problem, using a 2 level HME with softmax
+gates and softmax experts; the training set is on the left, the
+testing set on the right.
+
"Generalized Linear Models", McCullagh and Nelder, Chapman and
+Halll, 1983.
+
+
+"Improved learning algorithms for mixtures of experts in multiclass
+classification".
+K. Chen, L. Xu, H. Chi.
+Neural Networks (1999) 12: 1229-1252.
+
+
+
+Bayes nets originally arose out of an attempt to add probabilities to
+expert systems, and this is still the most common use for BNs.
+A famous example is
+QMR-DT, a decision-theoretic reformulation of the Quick Medical
+Reference (QMR) model.
+
+
+
+
+Here, the top layer represents hidden disease nodes, and the bottom
+layer represents observed symptom nodes.
+The goal is to infer the posterior probability of each disease given
+all the symptoms (which can be present, absent or unknown).
+Each node in the top layer has a Bernoulli prior (with a low prior
+probability that the disease is present).
+Since each node in the bottom layer has a high fan-in, we use a
+noisy-OR parameterization; each disease has an independent chance of
+causing each symptom.
+The real QMR-DT model is copyright, but
+we can create a random QMR-like model as follows.
+
+function bnet = mk_qmr_bnet(G, inhibit, leak, prior)
+% MK_QMR_BNET Make a QMR model
+% bnet = mk_qmr_bnet(G, inhibit, leak, prior)
+%
+% G(i,j) = 1 iff there is an arc from disease i to finding j
+% inhibit(i,j) = inhibition probability on i->j arc
+% leak(j) = inhibition prob. on leak->j arc
+% prior(i) = prob. disease i is on
+
+[Ndiseases Nfindings] = size(inhibit);
+N = Ndiseases + Nfindings;
+finding_node = Ndiseases+1:N;
+ns = 2*ones(1,N);
+dag = zeros(N,N);
+dag(1:Ndiseases, finding_node) = G;
+bnet = mk_bnet(dag, ns, 'observed', finding_node);
+
+for d=1:Ndiseases
+ CPT = [1-prior(d) prior(d)];
+ bnet.CPD{d} = tabular_CPD(bnet, d, CPT');
+end
+
+for i=1:Nfindings
+ fnode = finding_node(i);
+ ps = parents(G, i);
+ bnet.CPD{fnode} = noisyor_CPD(bnet, fnode, leak(i), inhibit(ps, i));
+end
+
+In the file BNT/examples/static/qmr1, we create a random bipartite
+graph G, with 5 diseases and 10 findings, and random parameters.
+(In general, to create a random dag, use 'mk_random_dag'.)
+We can visualize the resulting graph structure using
+the methods discussed below, with the
+following results:
+
+
+
+
+Now let us put some random evidence on all the leaves except the very
+first and very last, and compute the disease posteriors.
+
+Junction tree can be quite slow on large QMR models.
+Fortunately, it is possible to exploit properties of the noisy-OR
+function to speed up exact inference using an algorithm called
+quickscore, discussed below.
+
+
+
+
+
+
+
+A conditional Gaussian model is one in which, conditioned on all the discrete
+nodes, the distribution over the remaining (continuous) nodes is
+multivariate Gaussian. This means we can have arcs from discrete (D)
+to continuous (C) nodes, but not vice versa.
+(We are allowed C->D arcs if the continuous nodes are observed,
+as in the mixture of experts model,
+since this distribution can be represented with a discrete potential.)
+
+We now give an example of a CG model, from
+the paper "Propagation of Probabilities, Means amd
+Variances in Mixed Graphical Association Models", Steffen Lauritzen,
+JASA 87(420):1098--1108, 1992 (reprinted in the book "Probabilistic Networks and Expert
+Systems", R. G. Cowell, A. P. Dawid, S. L. Lauritzen and
+D. J. Spiegelhalter, Springer, 1999.)
+
+
Specifying the graph
+
+Consider the model of waste emissions from an incinerator plant shown below.
+We follow the standard convention that shaded nodes are observed,
+clear nodes are hidden.
+We also use the non-standard convention that
+square nodes are discrete (tabular) and round nodes are
+Gaussian.
+
+
+
+
+
+
+
+We can create this model as follows.
+
+F = 1; W = 2; E = 3; B = 4; C = 5; D = 6; Min = 7; Mout = 8; L = 9;
+n = 9;
+
+dag = zeros(n);
+dag(F,E)=1;
+dag(W,[E Min D]) = 1;
+dag(E,D)=1;
+dag(B,[C D])=1;
+dag(D,[L Mout])=1;
+dag(Min,Mout)=1;
+
+% node sizes - all cts nodes are scalar, all discrete nodes are binary
+ns = ones(1, n);
+dnodes = [F W B];
+cnodes = mysetdiff(1:n, dnodes);
+ns(dnodes) = 2;
+
+bnet = mk_bnet(dag, ns, 'discrete', dnodes);
+
+'dnodes' is a list of the discrete nodes; 'cnodes' is the continuous
+nodes. 'mysetdiff' is a faster version of the built-in 'setdiff'.
+
+
+
+
Specifying the parameters
+
+The parameters of the discrete nodes can be specified as follows.
+
+
+'marg' is a structure that contains the fields 'mu' and 'Sigma', which
+contain the mean and (co)variance of the marginal on E.
+In this case, they are both scalars.
+Let us check they match the published figures (to 2 decimal places).
+
+
+It is easy to visualize this posterior using standard Matlab plotting
+functions, e.g.,
+
+gaussplot2d(marg.mu, marg.Sigma);
+
+produces the following picture.
+
+
+
+
+
+
+
+
+The T field indicates that the mixing weight of this Gaussian
+component is 1.0.
+If the joint contains discrete and continuous variables, the result
+will be a mixture of Gaussians, e.g.,
+
+The interpretation is
+Sigma(i,j,k) = Cov[ E(i) E(j) | F=k ].
+In this case, E is a scalar, so i=j=1; k specifies the mixture component.
+
+We saw in the sprinkler network that BNT sets the effective size of
+observed discrete nodes to 1, since they only have one legal value.
+For continuous nodes, BNT sets their length to 0,
+since they have been reduced to a point.
+For example,
+
+It is simple to post-process the output of marginal_nodes.
+For example, the file BNT/examples/static/cg1 sets the mu term of
+observed nodes to their observed value, and the Sigma term to 0 (since
+observed nodes have no variance).
+
+
+Note that the implemented version of the junction tree is numerically
+unstable when using CG potentials
+(which is why, in the example above, we only required our answers to agree with
+the published ones to 2dp.)
+This is why you might want to use stab_cond_gauss_inf_engine,
+implemented by Shan Huang. This is described in
+
+
+
"Stable Local Computation with Conditional Gaussian Distributions",
+S. Lauritzen and F. Jensen, Tech Report R-99-2014,
+Dept. Math. Sciences, Allborg Univ., 1999.
+
+
+However, even the numerically stable version
+can be computationally intractable if there are many hidden discrete
+nodes, because the number of mixture components grows exponentially e.g., in a
+switching linear dynamical system.
+In general, one must resort to approximate inference techniques: see
+the discussion on inference engines below.
+
+
+
+
+The parameter estimation routines in BNT can be classified into 4
+types, depending on whether the goal is to compute
+a full (Bayesian) posterior over the parameters or just a point
+estimate (e.g., Maximum Likelihood or Maximum A Posteriori),
+and whether all the variables are fully observed or there is missing
+data/ hidden variables (partial observability).
+
+
+To load numeric data from an ASCII text file called 'dat.txt', where each row is a
+case and columns are separated by white-space, such as
+
+011979 1626.5 0.0
+021979 1367.0 0.0
+...
+
+you can use
+
+data = load('dat.txt');
+
+or
+
+load dat.txt -ascii
+
+In the latter case, the data is stored in a variable called 'dat' (the
+filename minus the extension).
+Alternatively, suppose the data is stored in a .csv file (has commas
+separating the columns, and contains a header line), such as
+
+header info goes here
+ORD,011979,1626.5,0.0
+DSM,021979,1367.0,0.0
+...
+
+If your file is not in either of these formats, you can either use Perl to convert
+it to this format, or use the Matlab scanf command.
+Type
+
+help iofun
+
+for more information on Matlab's file functions.
+
+
+BNT learning routines require data to be stored in a cell array.
+data{i,m} is the value of node i in case (example) m, i.e., each
+column is a case.
+If node i is not observed in case m (missing value), set
+data{i,m} = [].
+(Not all the learning routines can cope with such missing values, however.)
+In the special case that all the nodes are observed and are
+scalar-valued (as opposed to vector-valued), the data can be
+stored in a matrix (as opposed to a cell-array).
+
+Suppose, as in the mixture of experts example,
+that we have 3 nodes in the graph: X(1) is the observed input, X(3) is
+the observed output, and X(2) is a hidden (gating) node. We can
+create the dataset as follows.
+
+
+As an example, let's generate some data from the sprinkler network, randomize the parameters,
+and then try to recover the original model.
+First we create some training data using forwards sampling.
+
+samples{j,i} contains the value of the j'th node in case i.
+sample_bnet returns a cell array because, in general, each node might
+be a vector of different length.
+In this case, all nodes are discrete (and hence scalars), so we
+could have used a regular array instead (which can be quicker):
+
+data = cell2num(samples);
+
+Now we create a network with random parameters.
+(The initial values of bnet2 don't matter in this case, since we can find the
+globally optimal MLE independent of where we start.)
+
+% Make a tabula rasa
+bnet2 = mk_bnet(dag, node_sizes);
+seed = 0;
+rand('state', seed);
+bnet2.CPD{C} = tabular_CPD(bnet2, C);
+bnet2.CPD{R} = tabular_CPD(bnet2, R);
+bnet2.CPD{S} = tabular_CPD(bnet2, S);
+bnet2.CPD{W} = tabular_CPD(bnet2, W);
+
+Finally, we find the maximum likelihood estimates of the parameters.
+
+bnet3 = learn_params(bnet2, samples);
+
+To view the learned parameters, we use a little Matlab hackery.
+
+
+Currently, only tabular CPDs can have priors on their parameters.
+The conjugate prior for a multinomial is the Dirichlet.
+(For binary random variables, the multinomial is the same as the
+Bernoulli, and the Dirichlet is the same as the Beta.)
+
+The Dirichlet has a simple interpretation in terms of pseudo counts.
+If we let N_ijk = the num. times X_i=k and Pa_i=j occurs in the
+training set, where Pa_i are the parents of X_i,
+then the maximum likelihood (ML) estimate is
+T_ijk = N_ijk / N_ij (where N_ij = sum_k' N_ijk'), which will be 0 if N_ijk=0.
+To prevent us from declaring that (X_i=k, Pa_i=j) is impossible just because this
+event was not seen in the training set,
+we can pretend we saw value k of X_i, for each value j of Pa_i some number (alpha_ijk)
+of times in the past.
+The MAP (maximum a posterior) estimate is then
+
+and is never 0 if all alpha_ijk > 0.
+For example, consider the network A->B, where A is binary and B has 3
+values.
+A uniform prior for B has the form
+
+ B=1 B=2 B=3
+A=1 1 1 1
+A=2 1 1 1
+
+which can be created using
+
+tabular_CPD(bnet, i, 'prior_type', 'dirichlet', 'dirichlet_type', 'unif');
+
+This prior does not satisfy the likelihood equivalence principle,
+which says that Markov equivalent models
+should have the same marginal likelihood.
+A prior that does satisfy this principle is shown below.
+Heckerman (1995) calls this the
+BDeu prior (likelihood equivalent uniform Bayesian Dirichlet).
+
+ B=1 B=2 B=3
+A=1 1/6 1/6 1/6
+A=2 1/6 1/6 1/6
+
+where we put N/(q*r) in each bin; N is the equivalent sample size,
+r=|A|, q = |B|.
+This can be created as follows
+
+tabular_CPD(bnet, i, 'prior_type', 'dirichlet', 'dirichlet_type', 'BDeu');
+
+Here, 1 is the equivalent sample size, and is the strength of the
+prior.
+You can change this using
+
+bnet.CPD{i}.prior contains the new Dirichlet pseudocounts,
+and bnet.CPD{i}.CPT is set to the mean of the posterior (the
+normalized counts).
+(Hence if the initial pseudo counts are 0,
+bayes_update_params and learn_params will give the
+same result.)
+
+
+
+
+
+We can compute the same result sequentially (on-line) as follows.
+
+
+The file BNT/examples/static/StructLearn/model_select1 has an example of
+sequential model selection which uses the same idea.
+We generate data from the model A->B
+and compute the posterior prob of all 3 dags on 2 nodes:
+ (1) A B, (2) A <- B , (3) A -> B
+Models 2 and 3 are Markov equivalent, and therefore indistinguishable from
+observational data alone, so we expect their posteriors to be the same
+(assuming a prior which satisfies likelihood equivalence).
+If we use random parameters, the "true" model only gets a higher posterior after 2000 trials!
+However, if we make B a noisy NOT gate, the true model "wins" after 12
+trials, as shown below (red = model 1, blue/green (superimposed)
+represents models 2/3).
+
+
+
+The use of marginal likelihood for model selection is discussed in
+greater detail in the
+section on structure learning.
+
+
+
+
+
+samples2{i,l} is the value of node i in training case l, or [] if unobserved.
+
+Now we will compute the MLEs using the EM algorithm.
+We need to use an inference algorithm to compute the expected
+sufficient statistics in the E step; the M (maximization) step is as
+above.
+
+
+When fitting a Gaussian CPD using EM, you
+might encounter some numerical
+problems. See here
+for a discussion of this in the context of HMMs.
+
+
+
+
+
+In networks with repeated structure (e.g., chains and grids), it is
+common to assume that the parameters are the same at every node. This
+is called parameter tying, and reduces the amount of data needed for
+learning.
+
+When we have tied parameters, there is no longer a one-to-one
+correspondence between nodes and CPDs.
+Rather, each CPD species the parameters for a whole equivalence class
+of nodes.
+It is easiest to see this by example.
+Consider the following hidden Markov
+model (HMM)
+
+
+
+
+When HMMs are used for semi-infinite processes like speech recognition,
+we assume the transition matrix
+P(H(t+1)|H(t)) is the same for all t; this is called a time-invariant
+or homogenous Markov chain.
+Hence hidden nodes 2, 3, ..., T
+are all in the same equivalence class, say class Hclass.
+Similarly, the observation matrix P(O(t)|H(t)) is assumed to be the
+same for all t, so the observed nodes are all in the same equivalence
+class, say class Oclass.
+Finally, the prior term P(H(1)) is in a class all by itself, say class
+H1class.
+This is illustrated below, where we explicitly represent the
+parameters as random variables (dotted nodes).
+
+
+
+In BNT, we cannot represent parameters as random variables (nodes).
+Instead, we "hide" the
+parameters inside one CPD for each equivalence class,
+and then specify that the other CPDs should share these parameters, as
+follows.
+
+hnodes = 1:2:2*T;
+onodes = 2:2:2*T;
+H1class = 1; Hclass = 2; Oclass = 3;
+eclass = ones(1,N);
+eclass(hnodes(2:end)) = Hclass;
+eclass(hnodes(1)) = H1class;
+eclass(onodes) = Oclass;
+% create dag and ns in the usual way
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'equiv_class', eclass);
+
+Finally, we define the parameters for each equivalence class:
+
+In general, if bnet.CPD{e} = xxx_CPD(bnet, j), then j should be a
+member of e's equivalence class; that is, it is not always the case
+that e == j. You can use bnet.rep_of_eclass(e) to return the
+representative of equivalence class e.
+BNT will look up the parents of j to determine the size
+of the CPT to use. It assumes that this is the same for all members of
+the equivalence class.
+Click here for
+a more complex example of parameter tying.
+
+Note:
+Normally one would define an HMM as a
+Dynamic Bayes Net
+(see the function BNT/examples/dynamic/mk_chmm.m).
+However, one can define an HMM as a static BN using the function
+BNT/examples/static/Models/mk_hmm_bnet.m.
+
+
+
+
+
+Update (9/29/03):
+Phillipe LeRay is developing some additional structure learning code
+on top of BNT. Click here
+for details.
+
+
+
+There are two very different approaches to structure learning:
+constraint-based and search-and-score.
+In the constraint-based approach,
+we start with a fully connected graph, and remove edges if certain
+conditional independencies are measured in the data.
+This has the disadvantage that repeated independence tests lose
+statistical power.
+
+In the more popular search-and-score approach,
+we perform a search through the space of possible DAGs, and either
+return the best one found (a point estimate), or return a sample of the
+models found (an approximation to the Bayesian posterior).
+
+Unfortunately, the number of DAGs as a function of the number of
+nodes, G(n), is super-exponential in n.
+A closed form formula for G(n) is not known, but the first few values
+are shown below (from Cooper, 1999).
+
+
+
n
G(n)
+
1
1
+
2
3
+
3
25
+
4
543
+
5
29,281
+
6
3,781,503
+
7
1.1 x 10^9
+
8
7.8 x 10^11
+
9
1.2 x 10^15
+
10
4.2 x 10^18
+
+
+Since the number of DAGs is super-exponential in the number of nodes,
+we cannot exhaustively search the space, so we either use a local
+search algorithm (e.g., greedy hill climbining, perhaps with multiple
+restarts) or a global search algorithm (e.g., Markov Chain Monte
+Carlo).
+
+If we know a total ordering on the nodes,
+finding the best structure amounts to picking the best set of parents
+for each node independently.
+This is what the K2 algorithm does.
+If the ordering is unknown, we can search over orderings,
+which is more efficient than searching over DAGs (Koller and Friedman, 2000).
+
+In addition to the search procedure, we must specify the scoring
+function. There are two popular choices. The Bayesian score integrates
+out the parameters, i.e., it is the marginal likelihood of the model.
+The BIC (Bayesian Information Criterion) is defined as
+log P(D|theta_hat) - 0.5*d*log(N), where D is the data, theta_hat is
+the ML estimate of the parameters, d is the number of parameters, and
+N is the number of data cases.
+The BIC method has the advantage of not requiring a prior.
+
+BIC can be derived as a large sample
+approximation to the marginal likelihood.
+(It is also equal to the Minimum Description Length of a model.)
+However, in practice, the sample size does not need to be very large
+for the approximation to be good.
+For example, in the figure below, we plot the ratio between the log marginal likelihood
+and the BIC score against data-set size; we see that the ratio rapidly
+approaches 1, especially for non-informative priors.
+(This plot was generated by the file BNT/examples/static/bic1.m. It
+uses the water sprinkler BN with BDeu Dirichlet priors with different
+equivalent sample sizes.)
+
+
+
+
+
+
+
+
+As with parameter learning, handling missing data/ hidden variables is
+much harder than the fully observed case.
+The structure learning routines in BNT can therefore be classified into 4
+types, analogously to the parameter learning case.
+
+
+If two DAGs encode the same conditional independencies, they are
+called Markov equivalent. The set of all DAGs can be paritioned into
+Markov equivalence classes. Graphs within the same class can
+have
+the direction of some of their arcs reversed without changing any of
+the CI relationships.
+Each class can be represented by a PDAG
+(partially directed acyclic graph) called an essential graph or
+pattern. This specifies which edges must be oriented in a certain
+direction, and which may be reversed.
+
+
+When learning graph structure from observational data,
+the best one can hope to do is to identify the model up to Markov
+equivalence. To distinguish amongst graphs within the same equivalence
+class, one needs interventional data: see the discussion on active learning below.
+
+
+
+
+
+The brute-force approach to structure learning is to enumerate all
+possible DAGs, and score each one. This provides a "gold standard"
+with which to compare other algorithms. We can do this as follows.
+
+where data(i,m) is the value of node i in case m,
+and ns(i) is the size of node i.
+If the DAGs have a lot of families in common, we can cache the sufficient statistics,
+making this potentially more efficient than scoring the DAGs one at a time.
+(Caching is not currently implemented, however.)
+
+By default, we use the Bayesian scoring metric, and assume CPDs are
+represented by tables with BDeu(1) priors.
+We can override these defaults as follows.
+If we want to use uniform priors, we can say
+
+params{i} is a cell-array, containing optional arguments that are
+passed to the constructor for CPD i.
+
+Now suppose we want to use different node types, e.g.,
+Suppose nodes 1 and 2 are Gaussian, and nodes 3 and 4 softmax (both
+these CPDs can support discrete and continuous parents, which is
+necessary since all other nodes will be considered as parents).
+The Bayesian scoring metric currently only works for tabular CPDs, so
+we will use BIC:
+
+In practice, one can't enumerate all possible DAGs for N > 5,
+but one can evaluate any reasonably-sized set of hypotheses in this
+way (e.g., nearest neighbors of your current best guess).
+Think of this as "computer assisted model refinement" as opposed to de
+novo learning.
+
+
+
+
+The K2 algorithm (Cooper and Herskovits, 1992) is a greedy search algorithm that works as follows.
+Initially each node has no parents. It then adds incrementally that parent whose addition most
+increases the score of the resulting structure. When the addition of no single
+parent can increase the score, it stops adding parents to the node.
+Since we are using a fixed ordering, we do not need to check for
+cycles, and can choose the parents for each node independently.
+
+The original paper used the Bayesian scoring
+metric with tabular CPDs and Dirichlet priors.
+BNT generalizes this to allow any kind of CPD, and either the Bayesian
+scoring metric or BIC, as in the example above.
+In addition, you can specify
+an optional upper bound on the number of parents for each node.
+The file BNT/examples/static/k2demo1.m gives an example of how to use K2.
+We use the water sprinkler network and sample 100 cases from it as before.
+Then we see how much data it takes to recover the generating structure:
+
+So we see it takes about sz(10)=50 cases. (BIC behaves similarly,
+showing that the prior doesn't matter too much.)
+In general, we cannot hope to recover the "true" generating structure,
+only one that is in its Markov equivalence
+class.
+
+
+
+
+Hill-climbing starts at a specific point in space,
+considers all nearest neighbors, and moves to the neighbor
+that has the highest score; if no neighbors have higher
+score than the current point (i.e., we have reached a local maximum),
+the algorithm stops. One can then restart in another part of the space.
+
+A common definition of "neighbor" is all graphs that can be
+generated from the current graph by adding, deleting or reversing a
+single arc, subject to the acyclicity constraint.
+Other neighborhoods are possible: see
+
+Optimal Structure Identification with Greedy Search, Max
+Chickering, JMLR 2002.
+
+
+
+
+
+
+We can use a Markov Chain Monte Carlo (MCMC) algorithm called
+Metropolis-Hastings (MH) to search the space of all
+DAGs.
+The standard proposal distribution is to consider moving to all
+nearest neighbors in the sense defined above.
+
+The function can be called
+as in the following example.
+
+We can also plot the acceptance ratio versus number of MCMC steps,
+as a crude convergence diagnostic.
+
+clf
+plot(accept_ratio)
+
+
+
+Better convergence diagnostics can be found
+here
+
+Even though the number of samples needed by MCMC is theoretically
+polynomial (not exponential) in the dimensionality of the search space, in practice it has been
+found that MCMC does not converge in reasonable time for graphs with
+more than about 10 nodes.
+
+
+As was mentioned above,
+one can only learn a DAG up to Markov equivalence, even given infinite data.
+If one is interested in learning the structure of a causal network,
+one needs interventional data.
+(By "intervention" we mean forcing a node to take on a specific value,
+thereby effectively severing its incoming arcs.)
+
+Most of the scoring functions accept an optional argument
+that specifies whether a node was observed to have a certain value, or
+was forced to have that value: we set clamped(i,m)=1 if node i was
+forced in training case m. e.g., see the file
+BNT/examples/static/cooper_yoo.
+
+An interesting question is to decide which interventions to perform
+(c.f., design of experiments). For details, see the following tech
+report
+
+
+Computing the Bayesian score when there is partial observability is
+computationally challenging, because the parameter posterior becomes
+multimodal (the hidden nodes induce a mixture distribution).
+One therefore needs to use approximations such as BIC.
+Unfortunately, search algorithms are still expensive, because we need
+to run EM at each step to compute the MLE, which is needed to compute
+the score of each model. An alternative approach is
+to do the local search steps inside of the M step of EM, which is more
+efficient since the data has been "filled in" - this is
+called the structural EM algorithm (Friedman 1997), and provably
+converges to a local maximum of the BIC score.
+
+Wei Hu has implemented SEM for discrete nodes.
+You can download his package from
+here.
+Please address all questions about this code to
+wei.hu@intel.com.
+See also Phl's implementation of SEM.
+
+
+
+
+
+
+You can visualize an arbitrary graph (such as one learned using the
+structure learning routines) with Matlab code contributed by
+Ali
+Taylan Cemgil
+from the University of Nijmegen.
+For static BNs, call it as follows:
+
+If you install the excellent graphhviz, an
+open-source graph visualization package from AT&T,
+you can create a much better visualization as follows
+
+graph_to_dot(bnet.dag)
+
+This works by converting the adjacency matrix to a file suitable
+for input to graphviz (using the dot format),
+then converting the output of graphviz to postscript, and displaying the results using
+ghostview.
+You can do each of these steps separately for more control, as shown
+below.
+
+
+The IC algorithm (Pearl and Verma, 1991),
+and the faster, but otherwise equivalent, PC algorithm (Spirtes, Glymour, and Scheines 1993),
+computes many conditional independence tests,
+and combines these constraints into a
+PDAG to represent the whole
+Markov equivalence class.
+
+IC*/FCI extend IC/PC to handle latent variables: see below.
+(IC stands for inductive causation; PC stands for Peter and Clark,
+the first names of Spirtes and Glymour; FCI stands for fast causal
+inference.
+What we, following Pearl (2000), call IC* was called
+IC in the original Pearl and Verma paper.)
+For details, see
+
+
+The PC algorithm takes as arguments a function f, the number of nodes N,
+the maximum fan in K, and additional arguments A which are passed to f.
+The function f(X,Y,S,A) returns 1 if X is conditionally independent of Y given S, and 0
+otherwise.
+For example, suppose we cheat by
+passing in a CI "oracle" which has access to the true DAG; the oracle
+tests for d-separation in this DAG, i.e.,
+f(X,Y,S) calls dsep(X,Y,S,dag). We can to this as follows.
+
+pdag = learn_struct_pdag_pc('dsep', N, max_fan_in, dag);
+
+pdag(i,j) = -1 if there is definitely an i->j arc,
+and pdag(i,j) = 1 if there is either an i->j or and i<-j arc.
+
+So as expected, we see that the V-structure at the W node is uniquely identified,
+but the other arcs have ambiguous orientation.
+
+We now give an example from p141 (1st edn) / p103 (2nd end) of the SGS
+book.
+This example concerns the female orgasm.
+We are given a correlation matrix C between 7 measured factors (such
+as subjective experiences of coital and masturbatory experiences),
+derived from 281 samples, and want to learn a causal model of the
+data. We will not discuss the merits of this type of work here, but
+merely show how to reproduce the results in the SGS book.
+Their program,
+Tetrad,
+makes use of the Fisher Z-test for conditional
+independence, so we do the same:
+
+The results match those of Fig 12a of SGS apart from two edge
+differences; presumably this is due to rounding error (although it
+could be a bug, either in BNT or in Tetrad).
+This example can be found in the file BNT/examples/static/pc2.m.
+
+
+
+The IC* algorithm (Pearl and Verma, 1991),
+and the faster FCI algorithm (Spirtes, Glymour, and Scheines 1993),
+are like the IC/PC algorithm, except that they can detect the presence
+of latent variables.
+See the file learn_struct_pdag_ic_star written by Tamar
+Kushnir. The output is a matrix P, defined as follows
+(see Pearl (2000), p52 for details):
+
+% P(i,j) = -1 if there is either a latent variable L such that i <-L->j OR there is a directed edge from i->j.
+% P(i,j) = -2 if there is a marked directed i-*>j edge.
+% P(i,j) = P(j,i) = 1 if there is and undirected edge i--j
+% P(i,j) = P(j,i) = 2 if there is a latent variable L such that i<-L->j.
+
+
+Up until now, we have used the junction tree algorithm for inference.
+However, sometimes this is too slow, or not even applicable.
+In general, there are many inference algorithms each of which make
+different tradeoffs between speed, accuracy, complexity and
+generality. Furthermore, there might be many implementations of the
+same algorithm; for instance, a general purpose, readable version,
+and a highly-optimized, specialized one.
+To cope with this variety, we treat each inference algorithm as an
+object, which we call an inference engine.
+
+
+An inference engine is an object that contains a bnet and supports the
+'enter_evidence' and 'marginal_nodes' methods. The engine constructor
+takes the bnet as argument and may do some model-specific processing.
+When 'enter_evidence' is called, the engine may do some
+evidence-specific processing. Finally, when 'marginal_nodes' is
+called, the engine may do some query-specific processing.
+
+
+The amount of work done when each stage is specified -- structure,
+parameters, evidence, and query -- depends on the engine. The cost of
+work done early in this sequence can be amortized. On the other hand,
+one can make better optimizations if one waits until later in the
+sequence.
+For example, the parameters might imply
+conditional indpendencies that are not evident in the graph structure,
+but can nevertheless be exploited; the evidence indicates which nodes
+are observed and hence can effectively be disconnected from the
+graph; and the query might indicate that large parts of the network
+are d-separated from the query nodes. (Since it is not the actual
+values of the evidence that matters, just which nodes are observed,
+many engines allow you to specify which nodes will be observed when they are constructed,
+i.e., before calling 'enter_evidence'. Some engines can still cope if
+the actual pattern of evidence is different, e.g., if there is missing
+data.)
+
+
+Although being maximally lazy (i.e., only doing work when a query is
+issued) may seem desirable,
+this is not always the most efficient.
+For example,
+when learning using EM, we need to call marginal_nodes N times, where N is the
+number of nodes. Variable elimination would end
+up repeating a lot of work
+each time marginal_nodes is called, making it inefficient for
+learning. The junction tree algorithm, by contrast, uses dynamic
+programming to avoid this redundant computation --- it calculates all
+marginals in two passes during 'enter_evidence', so calling
+'marginal_nodes' takes constant time.
+
+We will discuss some of the inference algorithms implemented in BNT
+below, and finish with a summary of all
+of them.
+
+
+
+
+
+
+
+
+
+The variable elimination algorithm, also known as bucket elimination
+or peeling, is one of the simplest inference algorithms.
+The basic idea is to "push sums inside of products"; this is explained
+in more detail
+here.
+
+The principle of distributing sums over products can be generalized
+greatly to apply to any commutative semiring.
+This forms the basis of many common algorithms, such as Viterbi
+decoding and the Fast Fourier Transform. For details, see
+
+
+
R. McEliece and S. M. Aji, 2000.
+
+
+The Generalized Distributive Law,
+IEEE Trans. Inform. Theory, vol. 46, no. 2 (March 2000),
+pp. 325--343.
+
+
+
+Choosing an order in which to sum out the variables so as to minimize
+computational cost is known to be NP-hard.
+The implementation of this algorithm in
+var_elim_inf_engine makes no attempt to optimize this
+ordering (in contrast, say, to jtree_inf_engine, which uses a
+greedy search procedure to find a good ordering).
+
+Note: unlike most algorithms, var_elim does all its computational work
+inside of marginal_nodes, not inside of
+enter_evidence.
+
+
+
+
+
+
+The simplest inference algorithm of all is to explicitely construct
+the joint distribution over all the nodes, and then to marginalize it.
+This is implemented in global_joint_inf_engine.
+Since the size of the joint is exponential in the
+number of discrete (hidden) nodes, this is not a very practical algorithm.
+It is included merely for pedagogical and debugging purposes.
+
+Three specialized versions of this algorithm have also been implemented,
+corresponding to the cases where all the nodes are discrete (D), all
+are Gaussian (G), and some are discrete and some Gaussian (CG).
+They are called enumerative_inf_engine,
+gaussian_inf_engine,
+and cond_gauss_inf_engine respectively.
+
+Note: unlike most algorithms, these global inference algorithms do all their computational work
+inside of marginal_nodes, not inside of
+enter_evidence.
+
+
+
+
+The junction tree algorithm is quite slow on the QMR network,
+since the cliques are so big.
+One simple trick we can use is to notice that hidden leaves do not
+affect the posteriors on the roots, and hence do not need to be
+included in the network.
+A second trick is to notice that the negative findings can be
+"absorbed" into the prior:
+see the file
+BNT/examples/static/mk_minimal_qmr_bnet for details.
+
+
+A much more significant speedup is obtained by exploiting special
+properties of the noisy-or node, as done by the quickscore
+algorithm. For details, see
+
+
Heckerman, "A tractable inference algorithm for diagnosing multiple diseases", UAI 89.
+
Rish and Dechter, "On the impact of causal independence", UCI
+tech report, 1998.
+
+
+This has been implemented in BNT as a special-purpose inference
+engine, which can be created and used as follows:
+
+
+Even using quickscore, exact inference takes time that is exponential
+in the number of positive findings.
+Hence for large networks we need to resort to approximate inference techniques.
+See for example
+
+
T. Jaakkola and M. Jordan, "Variational probabilistic inference and the
+QMR-DT network", JAIR 10, 1999.
+
+
K. Murphy, Y. Weiss and M. Jordan, "Loopy belief propagation for approximate inference: an empirical study",
+ UAI 99.
+
+The latter approximation
+entails applying Pearl's belief propagation algorithm to a model even
+if it has loops (hence the name loopy belief propagation).
+Pearl's algorithm, implemented as pearl_inf_engine, gives
+exact results when applied to singly-connected graphs
+(a.k.a. polytrees, since
+the underlying undirected topology is a tree, but a node may have
+multiple parents).
+To apply this algorithm to a graph with loops,
+use pearl_inf_engine.
+This can use a centralized or distributed message passing protocol.
+You can use it as in the following example.
+
+We found that this algorithm often converges, and when it does, often
+is very accurate, but it depends on the precise setting of the
+parameter values of the network.
+(See the file BNT/examples/static/qmr1 to repeat the experiment for yourself.)
+Understanding when and why belief propagation converges/ works
+is a topic of ongoing research.
+
+pearl_inf_engine can exploit special structure in noisy-or
+and gmux nodes to compute messages efficiently.
+
+belprop_inf_engine is like pearl, but uses potentials to
+represent messages. Hence this is slower.
+
+belprop_fg_inf_engine is like belprop,
+but is designed for factor graphs.
+
+
+
+
+
+BNT now (Mar '02) has two sampling (Monte Carlo) inference algorithms:
+
+
likelihood_weighting_inf_engine which does importance
+sampling and can handle any node type.
+
gibbs_sampling_inf_engine, written by Bhaskara Marthi.
+Currently this can only handle tabular CPDs.
+For a much faster and more powerful Gibbs sampling program, see
+BUGS.
+
+Note: To generate samples from a network (which is not the same as inference!),
+use sample_bnet.
+
+
+
+
+
+
+The inference engines differ in many ways. Here are
+some of the major "axes":
+
+
Works for all topologies or makes restrictions?
+
Works for all node types or makes restrictions?
+
Exact or approximate inference?
+
+
+
+In terms of topology, most engines handle any kind of DAG.
+belprop_fg does approximate inference on factor graphs (FG), which
+can be used to represent directed, undirected, and mixed (chain)
+graphs.
+(In the future, we plan to support exact inference on chain graphs.)
+quickscore only works on QMR-like models.
+
+In terms of node types: algorithms that use potentials can handle
+discrete (D), Gaussian (G) or conditional Gaussian (CG) models.
+Sampling algorithms can essentially handle any kind of node (distribution).
+Other algorithms make more restrictive assumptions in exchange for
+speed.
+
+Finally, most algorithms are designed to give the exact answer.
+The belief propagation algorithms are exact if applied to trees, and
+in some other cases.
+Sampling is considered approximate, even though, in the limit of an
+infinite number of samples, it gives the exact answer.
+
+
+
+Here is a summary of the properties
+of all the engines in BNT which work on static networks.
+
+LIMIDs explicitely show all information arcs, rather than implicitely
+assuming no forgetting. This allows them to model forgetful
+controllers.
+
+See the examples in BNT/examples/limids for details.
+
+
+
+
+
DBNs, HMMs, Kalman filters and all that
+
+Click here for documentation about how to
+use BNT for dynamical systems and sequence data.
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/usage_dbn.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/usage_dbn.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,719 @@
+
+How to use BNT for DBNs
+
+
+
+
+
+Documentation last updated on 7 June 2004
+
+
+
+Note:
+you are recommended to read an introduction
+to DBNs first, such as
+
+this book chapter.
+
+You may also want to consider using
+GMTk, which is
+an excellent C++ package for DBNs.
+
+
+
+
+
+
+
+Dynamic Bayesian Networks (DBNs) are directed graphical models of stochastic
+processes.
+They generalise hidden Markov models (HMMs)
+and linear dynamical systems (LDSs)
+by representing the hidden (and observed) state in terms of state
+variables, which can have complex interdependencies.
+The graphical structure provides an easy way to specify these
+conditional independencies, and hence to provide a compact
+parameterization of the model.
+
+Note that "temporal Bayesian network" would be a better name than
+"dynamic Bayesian network", since
+it is assumed that the model structure does not change, but
+the term DBN has become entrenched.
+We also normally assume that the parameters do not
+change, i.e., the model is time-invariant.
+However, we can always add extra
+hidden nodes to represent the current "regime", thereby creating
+mixtures of models to capture periodic non-stationarities.
+
+There are some cases where the size of the state space can change over
+time, e.g., tracking a variable, but unknown, number of objects.
+In this case, we need to change the model structure over time.
+BNT does not support this.
+
+
+
+
+
+The simplest kind of DBN is a Hidden Markov Model (HMM), which has
+one discrete hidden node and one discrete or continuous
+observed node per slice. We illustrate this below.
+As before, circles denote continuous nodes, squares denote
+discrete nodes, clear means hidden, shaded means observed.
+
+
+
+
+We have "unrolled" the model for three "time slices" -- the structure and parameters are
+assumed to repeat as the model is unrolled further.
+Hence to specify a DBN, we need to
+define the intra-slice topology (within a slice),
+the inter-slice topology (between two slices),
+as well as the parameters for the first two slices.
+(Such a two-slice temporal Bayes net is often called a 2TBN.)
+
+We can specify the topology as follows.
+
+intra = zeros(2);
+intra(1,2) = 1; % node 1 in slice t connects to node 2 in slice t
+
+inter = zeros(2);
+inter(1,1) = 1; % node 1 in slice t-1 connects to node 1 in slice t
+
+We can specify the parameters as follows,
+where for simplicity we assume the observed node is discrete.
+
+We assume the distributions P(X(t) | X(t-1)) and
+P(Y(t) | X(t)) are independent of t for t > 1.
+Hence the CPD for nodes 5, 7, ... is the same as for node 3, so we say they
+are in the same equivalence class, with node 3 being the "representative"
+for this class. In other words, we have tied the parameters for nodes
+3, 5, 7, ...
+Similarly, nodes 4, 6, 8, ... are tied.
+Note, however, that (the parameters for) nodes 1 and 2 are not tied to
+subsequent slices.
+
+Above we assumed the observation model P(Y(t) | X(t)) is independent of t for t>1, but
+it is conventional to assume this is true for all t.
+So we would like to put nodes 2, 4, 6, ... all in the same class.
+We can do this by explicitely defining the equivalence classes, as
+follows (see here for more details on
+parameter tying).
+
+We define eclass1(i) to be the equivalence class that node i in slice
+1 belongs to.
+Similarly, we define eclass2(i) to be the equivalence class that node i in slice
+2, 3, ..., belongs to.
+For an HMM, we have
+
+This ties the observation model across slices,
+since e.g., eclass(4) = eclass(2) = 2.
+
+By default,
+eclass1 = 1:ss, and eclass2 = (1:ss)+ss, where ss = slice size = the
+number of nodes per slice.
+
+But by using the above tieing pattern,
+we now only have 3 CPDs to specify, instead of 4:
+
+
+Consider the following model
+of a water purification plant, developed
+by Finn V. Jensen, Uffe Kjærulff, Kristian G. Olesen, and Jan
+Pedersen.
+
+
+
+
+
+As an example of a more complicated DBN, consider the following
+example,
+which is a model of a car's high level state, as might be used by
+an automated car.
+(The model is from Forbes, Huang, Kanazawa and Russell, "The BATmobile: Towards a
+Bayesian Automated Taxi", IJCAI 95. The figure is from
+Boyen and Koller, "Tractable Inference for Complex Stochastic
+Processes", UAI98.
+For simplicity, we only show the observed nodes for slice 2.)
+
+
+
+
+
+Since this topology is so complicated,
+it is useful to be able to refer to the nodes by name, instead of
+number.
+
+Finally, we can convert this cell array to an adjacency matrix using
+the following function:
+
+[intra, names] = mk_adj_mat(intrac, names, 1);
+
+This function also permutes the names so that they are in topological
+order.
+Given this ordering of the names, we can make the inter-slice
+connectivity matrix as follows:
+
+To specify the parameters, we must know the order of the parents.
+See the function BNT/general/mk_named_CPT for a way to do this in the
+case of tabular nodes. For simplicity, we just generate random
+parameters:
+
+
+
+The general inference problem for DBNs is to compute
+P(X(i,t0) | Y(:, t1:t2)), where X(i,t) represents the i'th hidden
+variable at time t and Y(:,t1:t2) represents all the evidence
+between times t1 and t2.
+There are several special cases of interest, illustrated below.
+The arrow indicates t0: it is X(t0) that we are trying to estimate.
+The shaded region denotes t1:t2, the available data.
+
+
+
+
+
+BNT can currently only handle offline smoothing.
+(The HMM engine handles filtering and, to a limited extent, prediction.)
+The usage is similar to static
+inference engines, except now the evidence is a 2D cell array of
+size ss*T, where ss is the number of nodes per slice (ss = slice sizee) and T is the
+number of slices.
+Also, 'marginal_nodes' takes two arguments, the nodes and the time-slice.
+For example, to compute P(X(i,t) | y(:,1:T)), we proceed as follows
+(where onodes are the indices of the observedd nodes in each slice,
+which correspond to y):
+
+ev = sample_dbn(bnet, T);
+evidence = cell(ss,T);
+evidence(onodes,:) = ev(onodes, :); % all cells besides onodes are empty
+[engine, ll] = enter_evidence(engine, evidence);
+marg = marginal_nodes(engine, i, t);
+
+
+If all the hidden nodes are discrete,
+we can use the junction tree algorithm to perform inference.
+The simplest approach,
+jtree_unrolled_dbn_inf_engine,
+unrolls the DBN into a static network and applies jtree; however, for
+long sequences, this
+can be very slow and can result in numerical underflow.
+A better approach is to apply the jtree algorithm to pairs of
+neighboring slices at a time; this is implemented in
+jtree_dbn_inf_engine.
+
+
+A DBN can be converted to an HMM if all the hidden nodes are discrete.
+In this case, you can use
+hmm_inf_engine. This is faster than jtree for small models
+because the constant factors of the algorithm are lower, but can be
+exponentially slower for models with many variables
+(e.g., > 6 binary hidden nodes).
+
+
+The use of both
+jtree_dbn_inf_engine
+and
+hmm_inf_engine
+is deprecated.
+A better approach is to construct a smoother engine out of lower-level
+engines, which implement forward/backward operators.
+You can create these engines as follows.
+
+Note: you must declare the observed nodes in the bnet before using
+hmm_2TBN_inf_engine.
+
+
+
+Unfortunately, when all the hiddden nodes are discrete,
+exact inference takes O(2^n) time, where n is the number of hidden
+nodes per slice,
+even if the model is sparse.
+The basic reason for this is that two nodes become correlated, even if
+there is no direct connection between them in the 2TBN,
+by virtue of sharing common ancestors in the past.
+Hence we need to use approximations.
+
+A popular approximate inference algorithm for discrete DBNs, known as BK, is described in
+
+This approximates the belief state with a product of
+marginals on a specified set of clusters. For example,
+in the water network, we might use the following clusters:
+
+This engine can now be used just like the jtree engine.
+Two special cases of the BK algorithm are supported: 'ff' (fully
+factored) means each node has its own cluster, and 'exact' means there
+is 1 cluster that contains the whole slice. These can be created as
+follows:
+
+
+If all the hidden nodes are linear-Gaussian, and the observed nodes are
+linear-Gaussian,
+the model is a
+linear dynamical system (LDS).
+A DBN can be converted to an LDS if all the hidden nodes are linear-Gaussian
+and if they are all persistent. In this case, you can use
+kalman_inf_engine.
+For more general linear-gaussian models, you can use
+jtree_dbn_inf_engine or jtree_unrolled_dbn_inf_engine.
+
+
+For nonlinear systems with Gaussian noise, the unscented Kalman filter (UKF),
+due to Julier and Uhlmann, is far superior to the well-known extended Kalman
+filter (EKF), both in theory and practice.
+
+The key idea of the UKF is that it is easier to estimate a Gaussian distribution
+from a set of points than to approximate an arbitrary non-linear
+function.
+We start with points that are plus/minus sigma away from the mean along
+each dimension, and then pipe them through the nonlinearity, and
+then fit a Gaussian to the transformed points.
+(No need to compute Jacobians, unlike the EKF!)
+
+
+For systems with non-Gaussian noise, I recommend
+Particle
+filtering (PF), which is a popular sequential Monte Carlo technique.
+
+
+The EKF can be used as a proposal distribution for a PF.
+This method is better than either one alone.
+See The Unscented Particle Filter,
+by R van der Merwe, A Doucet, JFG de Freitas and E Wan, May 2000.
+Matlab
+software for the UPF is also available.
+
+Note: none of this software is part of BNT.
+
+
+
+
+
+Offline parameter learning is very similar to learning in static networks,
+except now the training data is a cell-array of 2D cell-arrays.
+For example,
+cases{l}{i,t} is the value of node i in slice t in sequence l, or []
+if unobserved.
+Each sequence can be a different length, and may have missing values
+in arbitrary locations.
+Here is a typical code fragment for using EM.
+
+
+There is currently only one structure learning algorithm for DBNs.
+This assumes all nodes are tabular and observed, and that there are
+no intra-slice connections. Hence we can find the optimal set of
+parents for each node separately, without worrying about directed
+cycles or node orderings.
+The function is called as follows
+
+A full example is given in BNT/examples/dynamic/reveal1.m.
+Setting the penalty term to 0 gives the maximum likelihood model; this
+is equivalent to maximizing the mutual information between parents and
+child (in the bioinformatics community, this is known as the REVEAL
+algorithm). A non-zero penalty invokes the BIC criterion, which
+lessens the chance of overfitting.
+
+
+Dirk Husmeier has extended MCMC model selection to DBNs.
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/usage_dbn_02nov13.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/usage_dbn_02nov13.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,715 @@
+
+How to use BNT for DBNs
+
+
+
+
+
+Documentation last updated on 13 November 2002
+
+
+
+
+
+
+Dynamic Bayesian Networks (DBNs) are directed graphical models of stochastic
+processes.
+They generalise hidden Markov models (HMMs)
+and linear dynamical systems (LDSs)
+by representing the hidden (and observed) state in terms of state
+variables, which can have complex interdependencies.
+The graphical structure provides an easy way to specify these
+conditional independencies, and hence to provide a compact
+parameterization of the model.
+
+Note that "temporal Bayesian network" would be a better name than
+"dynamic Bayesian network", since
+it is assumed that the model structure does not change, but
+the term DBN has become entrenched.
+We also normally assume that the parameters do not
+change, i.e., the model is time-invariant.
+However, we can always add extra
+hidden nodes to represent the current "regime", thereby creating
+mixtures of models to capture periodic non-stationarities.
+
+There are some cases where the size of the state space can change over
+time, e.g., tracking a variable, but unknown, number of objects.
+In this case, we need to change the model structure over time.
+BNT does not support this.
+
+
+
+
+
+The simplest kind of DBN is a Hidden Markov Model (HMM), which has
+one discrete hidden node and one discrete or continuous
+observed node per slice. We illustrate this below.
+As before, circles denote continuous nodes, squares denote
+discrete nodes, clear means hidden, shaded means observed.
+
+
+
+
+We have "unrolled" the model for three "time slices" -- the structure and parameters are
+assumed to repeat as the model is unrolled further.
+Hence to specify a DBN, we need to
+define the intra-slice topology (within a slice),
+the inter-slice topology (between two slices),
+as well as the parameters for the first two slices.
+(Such a two-slice temporal Bayes net is often called a 2TBN.)
+
+We can specify the topology as follows.
+
+intra = zeros(2);
+intra(1,2) = 1; % node 1 in slice t connects to node 2 in slice t
+
+inter = zeros(2);
+inter(1,1) = 1; % node 1 in slice t-1 connects to node 1 in slice t
+
+We can specify the parameters as follows,
+where for simplicity we assume the observed node is discrete.
+
+We assume the distributions P(X(t) | X(t-1)) and
+P(Y(t) | X(t)) are independent of t for t > 1.
+Hence the CPD for nodes 5, 7, ... is the same as for node 3, so we say they
+are in the same equivalence class, with node 3 being the "representative"
+for this class. In other words, we have tied the parameters for nodes
+3, 5, 7, ...
+Similarly, nodes 4, 6, 8, ... are tied.
+Note, however, that (the parameters for) nodes 1 and 2 are not tied to
+subsequent slices.
+
+Above we assumed the observation model P(Y(t) | X(t)) is independent of t for t>1, but
+it is conventional to assume this is true for all t.
+So we would like to put nodes 2, 4, 6, ... all in the same class.
+We can do this by explicitely defining the equivalence classes, as
+follows (see here for more details on
+parameter tying).
+
+We define eclass1(i) to be the equivalence class that node i in slice
+1 belongs to.
+Similarly, we define eclass2(i) to be the equivalence class that node i in slice
+2, 3, ..., belongs to.
+For an HMM, we have
+
+This ties the observation model across slices,
+since e.g., eclass(4) = eclass(2) = 2.
+
+By default,
+eclass1 = 1:ss, and eclass2 = (1:ss)+ss, where ss = slice size = the
+number of nodes per slice.
+
+But by using the above tieing pattern,
+we now only have 3 CPDs to specify, instead of 4:
+
+
+Consider the following model
+of a water purification plant, developed
+by Finn V. Jensen, Uffe Kjærulff, Kristian G. Olesen, and Jan
+Pedersen.
+
+
+
+
+
+As an example of a more complicated DBN, consider the following
+example,
+which is a model of a car's high level state, as might be used by
+an automated car.
+(The model is from Forbes, Huang, Kanazawa and Russell, "The BATmobile: Towards a
+Bayesian Automated Taxi", IJCAI 95. The figure is from
+Boyen and Koller, "Tractable Inference for Complex Stochastic
+Processes", UAI98.
+For simplicity, we only show the observed nodes for slice 2.)
+
+
+
+
+
+Since this topology is so complicated,
+it is useful to be able to refer to the nodes by name, instead of
+number.
+
+Finally, we can convert this cell array to an adjacency matrix using
+the following function:
+
+[intra, names] = mk_adj_mat(intrac, names, 1);
+
+This function also permutes the names so that they are in topological
+order.
+Given this ordering of the names, we can make the inter-slice
+connectivity matrix as follows:
+
+To specify the parameters, we must know the order of the parents.
+See the function BNT/general/mk_named_CPT for a way to do this in the
+case of tabular nodes. For simplicity, we just generate random
+parameters:
+
+
+
+The general inference problem for DBNs is to compute
+P(X(i,t0) | Y(:, t1:t2)), where X(i,t) represents the i'th hidden
+variable at time t and Y(:,t1:t2) represents all the evidence
+between times t1 and t2.
+There are several special cases of interest, illustrated below.
+The arrow indicates t0: it is X(t0) that we are trying to estimate.
+The shaded region denotes t1:t2, the available data.
+
+
+
+
+
+BNT can currently only handle offline smoothing.
+(The HMM engine handles filtering and, to a limited extent, prediction.)
+The usage is similar to static
+inference engines, except now the evidence is a 2D cell array of
+size ss*T, where ss is the number of nodes per slice (ss = slice sizee) and T is the
+number of slices.
+Also, 'marginal_nodes' takes two arguments, the nodes and the time-slice.
+For example, to compute P(X(i,t) | y(:,1:T)), we proceed as follows
+(where onodes are the indices of the observedd nodes in each slice,
+which correspond to y):
+
+ev = sample_dbn(bnet, T);
+evidence = cell(ss,T);
+evidence(onodes,:) = ev(onodes, :); % all cells besides onodes are empty
+[engine, ll] = enter_evidence(engine, evidence);
+marg = marginal_nodes(engine, i, t);
+
+
+If all the hidden nodes are discrete,
+we can use the junction tree algorithm to perform inference.
+The simplest approach,
+jtree_unrolled_dbn_inf_engine,
+unrolls the DBN into a static network and applies jtree; however, for
+long sequences, this
+can be very slow and can result in numerical underflow.
+A better approach is to apply the jtree algorithm to pairs of
+neighboring slices at a time; this is implemented in
+jtree_dbn_inf_engine.
+
+
+A DBN can be converted to an HMM if all the hidden nodes are discrete.
+In this case, you can use
+hmm_inf_engine. This is faster than jtree for small models
+because the constant factors of the algorithm are lower, but can be
+exponentially slower for models with many variables
+(e.g., > 6 binary hidden nodes).
+
+
+The use of both
+jtree_dbn_inf_engine
+and
+hmm_inf_engine
+is deprecated.
+A better approach is to construct a smoother engine out of lower-level
+engines, which implement forward/backward operators.
+You can create these engines as follows.
+
+Note: you must declare the observed nodes in the bnet before using
+hmm_2TBN_inf_engine.
+
+
+
+Unfortunately, when all the hiddden nodes are discrete,
+exact inference takes O(2^n) time, where n is the number of hidden
+nodes per slice,
+even if the model is sparse.
+The basic reason for this is that two nodes become correlated, even if
+there is no direct connection between them in the 2TBN,
+by virtue of sharing common ancestors in the past.
+Hence we need to use approximations.
+
+A popular approximate inference algorithm for discrete DBNs, known as BK, is described in
+
+This approximates the belief state with a product of
+marginals on a specified set of clusters. For example,
+in the water network, we might use the following clusters:
+
+This engine can now be used just like the jtree engine.
+Two special cases of the BK algorithm are supported: 'ff' (fully
+factored) means each node has its own cluster, and 'exact' means there
+is 1 cluster that contains the whole slice. These can be created as
+follows:
+
+
+If all the hidden nodes are linear-Gaussian, and the observed nodes are
+linear-Gaussian,
+the model is a
+linear dynamical system (LDS).
+A DBN can be converted to an LDS if all the hidden nodes are linear-Gaussian
+and if they are all persistent. In this case, you can use
+kalman_inf_engine.
+For more general linear-gaussian models, you can use
+jtree_dbn_inf_engine or jtree_unrolled_dbn_inf_engine.
+
+
+For nonlinear systems with Gaussian noise, the unscented Kalman filter (UKF),
+due to Julier and Uhlmann, is far superior to the well-known extended Kalman
+filter (EKF), both in theory and practice.
+
+The key idea of the UKF is that it is easier to estimate a Gaussian distribution
+from a set of points than to approximate an arbitrary non-linear
+function.
+We start with points that are plus/minus sigma away from the mean along
+each dimension, and then pipe them through the nonlinearity, and
+then fit a Gaussian to the transformed points.
+(No need to compute Jacobians, unlike the EKF!)
+
+
+For systems with non-Gaussian noise, I recommend
+Particle
+filtering (PF), which is a popular sequential Monte Carlo technique.
+
+
+The EKF can be used as a proposal distribution for a PF.
+This method is better than either one alone.
+See The Unscented Particle Filter,
+by R van der Merwe, A Doucet, JFG de Freitas and E Wan, May 2000.
+Matlab
+software for the UPF is also available.
+
+Note: none of this software is part of BNT.
+
+
+
+
+
+Offline parameter learning is very similar to learning in static networks,
+except now the training data is a cell-array of 2D cell-arrays.
+For example,
+cases{l}{i,t} is the value of node i in slice t in sequence l, or []
+if unobserved.
+Each sequence can be a different length, and may have missing values
+in arbitrary locations.
+Here is a typical code fragment for using EM.
+
+
+There is currently only one structure learning algorithm for DBNs.
+This assumes all nodes are tabular and observed, and that there are
+no intra-slice connections. Hence we can find the optimal set of
+parents for each node separately, without worrying about directed
+cycles or node orderings.
+The function is called as follows
+
+A full example is given in BNT/examples/dynamic/reveal1.m.
+Setting the penalty term to 0 gives the maximum likelihood model; this
+is equivalent to maximizing the mutual information between parents and
+child (in the bioinformatics community, this is known as the REVEAL
+algorithm). A non-zero penalty invokes the BIC criterion, which
+lessens the chance of overfitting.
+
+
+Dirk Husmeier has extended MCMC model selection to DBNs.
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/docs/usage_sf.html
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/docs/usage_sf.html Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,3242 @@
+
+How to use the Bayes Net Toolbox
+
+
+
+
+
+
How to use the Bayes Net Toolbox
+
+This documentation was last updated on 7 June 2004.
+
+Click here for a list of changes made to
+BNT.
+
+Click
+here
+for a French version of this documentation (which might not
+be up-to-date).
+
+Update 23 May 2005:
+Philippe LeRay has written
+a
+
+BNT GUI
+and
+
+BNT Structure Learning Package.
+
+
Unpack the file. In Unix, type
+
+"unzip FullBNT.zip".
+In Windows, use
+a program like Winzip. This will
+create a directory called FullBNT, which contains BNT and other libraries.
+(Files ending in ~ or # are emacs backup files, and can be ignored.)
+
+
+
Read the file BNT/README.txt to make sure the date
+matches the one on the top of the BNT home page.
+If not, you may need to press 'refresh' on your browser, and download
+again, to get the most recent version.
+
+
+
Edit the file "FullBNT/BNT/add_BNT_to_path.m" so it contains the correct
+pathname.
+For example, in Windows,
+I download FullBNT.zip into C:\kmurphy\matlab, and
+then ensure the second lines reads
+
+BNT_HOME = 'C:\kmurphy\matlab\FullBNT';
+
+
+
+
Start up Matlab.
+
+
+
Type "ver" at the Matlab prompt (">>").
+You need Matlab version 5.2 or newer to run BNT.
+(Versions 5.0 and 5.1 have a memory leak which seems to sometimes
+crash BNT.)
+BNT will not run on Octave.
+
+
+
Move to the BNT directory.
+For example, in Windows, I type
+
+>> cd C:\kpmurphy\matlab\FullBNT\BNT
+
+
+
+
Type "add_BNT_to_path".
+This executes the command
+addpath(genpath(BNT_HOME)),
+which adds all directories below FullBNT to the matlab path.
+
+
+
Type "test_BNT".
+
+If all goes well, this will produce a bunch of numbers and maybe some
+warning messages (which you can ignore), but no error messages.
+(The warnings should only be of the form
+"Warning: Maximum number of iterations has been exceeded", and are
+produced by Netlab.)
+
+
+
Problems? Did you remember to
+Edit the file "FullBNT/BNT/add_BNT_to_path.m" so it contains
+the right path??
+
+
+
+Some BNT functions also have C implementations.
+It is not necessary to install the C code, but it can result in a speedup
+of a factor of 2-5.
+To install all the C code,
+edit installC_BNT.m so it contains the right path,
+then type installC_BNT.
+(Ignore warnings of the form 'invalid white space character in directive'.)
+To uninstall all the C code,
+edit uninstallC_BNT.m so it contains the right path,
+then type uninstallC_BNT.
+For an up-to-date list of the files which have C implementations, see
+BNT/installC_BNT.m.
+
+
+mex is a script that lets you call C code from Matlab - it does not compile matlab to
+C (see mcc below).
+If your C/C++ compiler is set up correctly, mex should work out of
+the box.
+If not, you might need to type
+
+In general, typing
+'mex foo.c' from inside Matlab creates a file called
+'foo.mexglx' or 'foo.dll' (the exact file
+extension is system dependent - on Linux it is 'mexglx', on Windows it is '.dll').
+The resulting file will hide the original 'foo.m' (if it existed), i.e.,
+typing 'foo' at the prompt will call the compiled C version.
+To reveal the original matlab version, just delete foo.mexglx (this is
+what uninstallC does).
+
+Sometimes it takes time for Matlab to realize that the file has
+changed from matlab to C or vice versa; try typing 'clear all' or
+restarting Matlab to refresh it.
+To find out which version of a file you are running, type
+'which foo'.
+
+mcc, the
+Matlab to C compiler, is a separate product,
+and is quite different from mex. It does not yet support
+objects/classes, which is why we can't compile all of BNT to C automatically.
+Also, hand-written C code is usually much
+better than the C code generated by mcc.
+
+
+
+Acknowledgements:
+Most of the C code (e.g., for jtree and dpot) was written by Wei Hu;
+the triangulation C code was written by Ilya Shpitser;
+the Gibbs sampling C code (for discrete nodes) was written by Bhaskara
+Marthi.
+
+
+
+
+
+To define a Bayes net, you must specify the graph structure and then
+the parameters. We look at each in turn, using a simple example
+(adapted from Russell and
+Norvig, "Artificial Intelligence: a Modern Approach", Prentice Hall,
+1995, p454).
+
+
+
Graph structure
+
+
+Consider the following network.
+
+
+
+
+
+
+
+
+To specify this directed acyclic graph (dag), we create an adjacency matrix:
+
+N = 4;
+dag = zeros(N,N);
+C = 1; S = 2; R = 3; W = 4;
+dag(C,[R S]) = 1;
+dag(R,W) = 1;
+dag(S,W)=1;
+
+
+We have numbered the nodes as follows:
+Cloudy = 1, Sprinkler = 2, Rain = 3, WetGrass = 4.
+The nodes must always be numbered in topological order, i.e.,
+ancestors before descendants.
+For a more complicated graph, this is a little inconvenient: we will
+see how to get around this below.
+
+In Matlab 6, you can use logical arrays instead of double arrays,
+which are 4 times smaller:
+
+dag = false(N,N);
+dag(C,[R S]) = true;
+...
+
+However, some graph functions (eg acyclic) do not work on
+logical arrays!
+
+A preliminary attempt to make a GUI
+has been writte by Philippe LeRay and can be downloaded
+from here.
+
+You can visualize the resulting graph structure using
+the methods discussed below.
+
+
Creating the Bayes net shell
+
+In addition to specifying the graph structure,
+we must specify the size and type of each node.
+If a node is discrete, its size is the
+number of possible values
+each node can take on; if a node is continuous,
+it can be a vector, and its size is the length of this vector.
+In this case, we will assume all nodes are discrete and binary.
+
+If the nodes were not binary, you could type e.g.,
+
+node_sizes = [4 2 3 5];
+
+meaning that Cloudy has 4 possible values,
+Sprinkler has 2 possible values, etc.
+Note that these are cardinal values, not ordinal, i.e.,
+they are not ordered in any way, like 'low', 'medium', 'high'.
+
+Note that optional arguments are specified using a name/value syntax.
+This is common for many BNT functions.
+In general, to find out more about a function (e.g., which optional
+arguments it takes), please see its
+documentation string by typing
+
+
+A model consists of the graph structure and the parameters.
+The parameters are represented by CPD objects (CPD = Conditional
+Probability Distribution), which define the probability distribution
+of a node given its parents.
+(We will use the terms "node" and "random variable" interchangeably.)
+The simplest kind of CPD is a table (multi-dimensional array), which
+is suitable when all the nodes are discrete-valued. Note that the discrete
+values are not assumed to be ordered in any way; that is, they
+represent categorical quantities, like male and female, rather than
+ordinal quantities, like low, medium and high.
+(We will discuss CPDs in more detail below.)
+
+Tabular CPDs, also called CPTs (conditional probability tables),
+are stored as multidimensional arrays, where the dimensions
+are arranged in the same order as the nodes, e.g., the CPT for node 4
+(WetGrass) is indexed by Sprinkler (2), Rain (3) and then WetGrass (4) itself.
+Hence the child is always the last dimension.
+If a node has no parents, its CPT is a column vector representing its
+prior.
+Note that in Matlab (unlike C), arrays are indexed
+from 1, and are layed out in memory such that the first index toggles
+fastest, e.g., the CPT for node 4 (WetGrass) is as follows
+
+
+
+where we have used the convention that false==1, true==2.
+We can create this CPT in Matlab as follows
+
+
+If we do not specify the CPT, random parameters will be
+created, i.e., each "row" of the CPT will be drawn from the uniform distribution.
+To ensure repeatable results, use
+
+rand('state', seed);
+randn('state', seed);
+
+To control the degree of randomness (entropy),
+you can sample each row of the CPT from a Dirichlet(p,p,...) distribution.
+If p << 1, this encourages "deterministic" CPTs (one entry near 1, the rest near 0).
+If p = 1, each entry is drawn from U[0,1].
+If p >> 1, the entries will all be near 1/k, where k is the arity of
+this node, i.e., each row will be nearly uniform.
+You can do this as follows, assuming this node
+is number i, and ns is the node_sizes.
+
+It is currently not possible to save/load a BNT matlab object to
+file, but this is easily fixed if you modify all the constructors
+for all the classes (see matlab documentation).
+
+
+
+Having created the BN, we can now use it for inference.
+There are many different algorithms for doing inference in Bayes nets,
+that make different tradeoffs between speed,
+complexity, generality, and accuracy.
+BNT therefore offers a variety of different inference
+"engines". We will discuss these
+in more detail below.
+For now, we will use the junction tree
+engine, which is the mother of all exact inference algorithms.
+This can be created as follows.
+
+engine = jtree_inf_engine(bnet);
+
+The other engines have similar constructors, but might take
+additional, algorithm-specific parameters.
+All engines are used in the same way, once they have been created.
+We illustrate this in the following sections.
+
+
+
+
+Suppose we want to compute the probability that the sprinker was on
+given that the grass is wet.
+The evidence consists of the fact that W=2. All the other nodes
+are hidden (unobserved). We can specify this as follows.
+
+evidence = cell(1,N);
+evidence{W} = 2;
+
+We use a 1D cell array instead of a vector to
+cope with the fact that nodes can be vectors of different lengths.
+In addition, the value [] can be used
+to denote 'no evidence', instead of having to specify the observation
+pattern as a separate argument.
+(Click here for a quick tutorial on cell
+arrays in matlab.)
+
+We are now ready to add the evidence to the engine.
+
+The behavior of this function is algorithm-specific, and is discussed
+in more detail below.
+In the case of the jtree engine,
+enter_evidence implements a two-pass message-passing scheme.
+The first return argument contains the modified engine, which
+incorporates the evidence. The second return argument contains the
+log-likelihood of the evidence. (Not all engines are capable of
+computing the log-likelihood.)
+
+Finally, we can compute p=P(S=2|W=2) as follows.
+
+
+What happens if we ask for the marginal on an observed node, e.g. P(W|W=2)?
+An observed discrete node effectively only has 1 value (the observed
+ one) --- all other values would result in 0 probability.
+For efficiency, BNT treats observed (discrete) nodes as if they were
+ set to 1, as we see below:
+
+m is a structure. The 'T' field is a multi-dimensional array (in
+this case, 3-dimensional) that contains the joint probability
+distribution on the specified nodes.
+
+The joint T(i,j,k) = P(S=i,R=j,W=k|evidence)
+should have T(i,1,k) = 0 for all i,k, since R=1 is incompatible
+with the evidence that R=2.
+Instead of creating large tables with many 0s, BNT sets the effective
+size of observed (discrete) nodes to 1, as explained above.
+This is why m.T has size 2x1x2.
+To get a 2x2x2 table, type
+
+Note: It is not always possible to compute the joint on arbitrary
+sets of nodes: it depends on which inference engine you use, as discussed
+in more detail below.
+
+
+
+
+Sometimes a node is not observed, but we have some distribution over
+its possible values; this is often called "soft" or "virtual"
+evidence.
+One can use this as follows
+
+where soft_evidence{i} is either [] (if node i has no soft evidence)
+or is a vector representing the probability distribution over i's
+possible values.
+For example, if we don't know i's exact value, but we know its
+likelihood ratio is 60/40, we can write evidence{i} = [] and
+soft_evidence{i} = [0.6 0.4].
+
+Currently only jtree_inf_engine supports this option.
+It assumes that all hidden nodes, and all nodes for
+which we have soft evidence, are discrete.
+For a longer example, see BNT/examples/static/softev1.m.
+
+
+
+
+To compute the most probable explanation (MPE) of the evidence (i.e.,
+the most probable assignment, or a mode of the joint), use
+
+[mpe, ll] = calc_mpe(engine, evidence);
+
+mpe{i} is the most likely value of node i.
+This calls enter_evidence with the 'maximize' flag set to 1, which
+causes the engine to do max-product instead of sum-product.
+The resulting max-marginals are then thresholded.
+If there is more than one maximum probability assignment, we must take
+ care to break ties in a consistent manner (thresholding the
+ max-marginals may give the wrong result). To force this behavior,
+ type
+
+[mpe, ll] = calc_mpe(engine, evidence, 1);
+
+Note that computing the MPE is someties called abductive reasoning.
+
+
+You can also use calc_mpe_bucket written by Ron Zohar,
+that does a forwards max-product pass, and then a backwards traceback
+pass, which is how Viterbi is traditionally implemented.
+
+
+
+
+
+A Conditional Probability Distributions (CPD)
+defines P(X(i) | X(Pa(i))), where X(i) is the i'th node, and X(Pa(i))
+are the parents of node i. There are many ways to represent this
+distribution, which depend in part on whether X(i) and X(Pa(i)) are
+discrete, continuous, or a combination.
+We will discuss various representations below.
+
+
+
+
+If the CPD is represented as a table (i.e., if it is a multinomial
+distribution), it has a number of parameters that is exponential in
+the number of parents. See the example above.
+
+
+
+
+A noisy-OR node is like a regular logical OR gate except that
+sometimes the effects of parents that are on get inhibited.
+Let the prob. that parent i gets inhibited be q(i).
+Then a node, C, with 2 parents, A and B, has the following CPD, where
+we use F and T to represent off and on (1 and 2 in BNT).
+
+A B P(C=off) P(C=on)
+---------------------------
+F F 1.0 0.0
+T F q(A) 1-q(A)
+F T q(B) 1-q(B)
+T T q(A)q(B) q-q(A)q(B)
+
+Thus we see that the causes get inhibited independently.
+It is common to associate a "leak" node with a noisy-or CPD, which is
+like a parent that is always on. This can account for all other unmodelled
+causes which might turn the node on.
+
+The noisy-or distribution is similar to the logistic distribution.
+To see this, let the nodes, S(i), have values in {0,1}, and let q(i,j)
+be the prob. that j inhibits i. Then
+
+where sigma(x) = 1/(1+exp(-x)). Hence they differ in the choice of
+the activation function (although both are monotonically increasing).
+In addition, in the case of a noisy-or, the weights are constrained to be
+positive, since they derive from probabilities q(i,j).
+In both cases, the number of parameters is linear in the
+number of parents, unlike the case of a multinomial distribution,
+where the number of parameters is exponential in the number of parents.
+We will see an example of noisy-OR nodes below.
+
+
+
+
+Deterministic CPDs for discrete random variables can be created using
+the deterministic_CPD class. It is also possible to 'flip' the output
+of the function with some probability, to simulate noise.
+The boolean_CPD class is just a special case of a
+deterministic CPD, where the parents and child are all binary.
+
+Both of these classes are just "syntactic sugar" for the tabular_CPD
+class.
+
+
+
+
+
+If we have a discrete node with a continuous parent,
+we can define its CPD using a softmax function
+(also known as the multinomial logit function).
+This acts like a soft thresholding operator, and is defined as follows:
+
+The parameters of a softmax node, w(:,i) and b(i), i=1..|Q|, have the
+following interpretation: w(:,i)-w(:,j) is the normal vector to the
+decision boundary between classes i and j,
+and b(i)-b(j) is its offset (bias). For example, suppose
+X is a 2-vector, and Q is binary. Then
+
+w = [1 -1;
+ 0 0];
+
+b = [0 0];
+
+means class 1 are points in the 2D plane with positive x coordinate,
+and class 2 are points in the 2D plane with negative x coordinate.
+If w has large magnitude, the decision boundary is sharp, otherwise it
+is soft.
+In the special case that Q is binary (0/1), the softmax function reduces to the logistic
+(sigmoid) function.
+
+Fitting a softmax function can be done using the iteratively reweighted
+least squares (IRLS) algorithm.
+We use the implementation from
+Netlab.
+Note that since
+the softmax distribution is not in the exponential family, it does not
+have finite sufficient statistics, and hence we must store all the
+training data in uncompressed form.
+If this takes too much space, one should use online (stochastic) gradient
+descent (not implemented in BNT).
+
+If a softmax node also has discrete parents,
+we use a different set of w/b parameters for each combination of
+parent values, as in the conditional linear
+Gaussian CPD.
+This feature was implemented by Pierpaolo Brutti.
+He is currently extending it so that discrete parents can be treated
+as if they were continuous, by adding indicator variables to the X
+vector.
+
+We will see an example of softmax nodes below.
+
+
+
+
+Pierpaolo Brutti has implemented the mlp_CPD class, which uses a multi layer perceptron
+to implement a mapping from continuous parents to discrete children,
+similar to the softmax function.
+(If there are also discrete parents, it creates a mixture of MLPs.)
+It uses code from Netlab.
+This is work in progress.
+
+
+
+A root node has no parents and no parameters; it can be used to model
+an observed, exogeneous input variable, i.e., one which is "outside"
+the model.
+This is useful for conditional density models.
+We will see an example of root nodes below.
+
+
+
+
+We now consider a distribution suitable for the continuous-valued nodes.
+Suppose the node is called Y, its continuous parents (if any) are
+called X, and its discrete parents (if any) are called Q.
+The distribution on Y is defined as follows:
+
+- no parents: Y ~ N(mu, Sigma)
+- cts parents : Y|X=x ~ N(mu + W x, Sigma)
+- discrete parents: Y|Q=i ~ N(mu(:,i), Sigma(:,:,i))
+- cts and discrete parents: Y|X=x,Q=i ~ N(mu(:,i) + W(:,:,i) * x, Sigma(:,:,i))
+
+where N(mu, Sigma) denotes a Normal distribution with mean mu and
+covariance Sigma. Let |X|, |Y| and |Q| denote the sizes of X, Y and Q
+respectively.
+If there are no discrete parents, |Q|=1; if there is
+more than one, then |Q| = a vector of the sizes of each discrete parent.
+If there are no continuous parents, |X|=0; if there is more than one,
+then |X| = the sum of their sizes.
+Then mu is a |Y|*|Q| vector, Sigma is a |Y|*|Y|*|Q| positive
+semi-definite matrix, and W is a |Y|*|X|*|Q| regression (weight)
+matrix.
+
+We can create a Gaussian node with random parameters as follows.
+
+bnet.CPD{i} = gaussian_CPD(bnet, i);
+
+We can specify the value of one or more of the parameters as in the
+following example, in which |Y|=2, and |Q|=1.
+
+We will see an example of conditional linear Gaussian nodes below.
+
+When learning Gaussians from data, it is helpful to ensure the
+data has a small magnitde
+(see e.g., KPMstats/standardize) to prevent numerical problems.
+Unless you have a lot of data, it is also a very good idea to use
+diagonal instead of full covariance matrices.
+(BNT does not currently support spherical covariances, although it
+would be easy to add, since KPMstats/clg_Mstep supports this option;
+you would just need to modify gaussian_CPD/update_ess to accumulate
+weighted inner products.)
+
+
+
+
+
+Currently BNT does not support any CPDs for continuous nodes other
+than the Gaussian.
+However, you can use a mixture of Gaussians to
+approximate other continuous distributions. We will see some an example
+of this with the IFA model below.
+
+
+
+
+We plan to add classification and regression trees to define CPDs for
+discrete and continuous nodes, respectively.
+Trees have many advantages: they are easy to interpret, they can do
+feature selection, they can
+handle discrete and continuous inputs, they do not make strong
+assumptions about the form of the distribution, the number of
+parameters can grow in a data-dependent way (i.e., they are
+semi-parametric), they can handle missing data, etc.
+However, they are not yet implemented.
+
+
+
+
+
+We list all the different types of CPDs supported by BNT.
+For each CPD, we specify if the child and parents can be discrete (D) or
+continuous (C) (Binary (B) nodes are a special case).
+We also specify which methods each class supports.
+If a method is inherited, the name of the parent class is mentioned.
+If a parent class calls a child method, this is mentioned.
+
+The CPD_to_CPT method converts a CPD to a table; this
+requires that the child and all parents are discrete.
+The CPT might be exponentially big...
+convert_to_table evaluates a CPD with evidence, and
+represents the the resulting potential as an array.
+This requires that the child is discrete, and any continuous parents
+are observed.
+convert_to_pot evaluates a CPD with evidence, and
+represents the resulting potential as a dpot, gpot, cgpot or upot, as
+requested. (d=discrete, g=Gaussian, cg = conditional Gaussian, u =
+utility).
+
+
+When we sample a node, all the parents are observed.
+When we compute the (log) probability of a node, all the parents and
+the child are observed.
+
+We also specify if the parameters are learnable.
+For learning with EM, we require
+the methods reset_ess, update_ess and
+maximize_params.
+For learning from fully observed data, we require
+the method learn_params.
+By default, all classes inherit this from generic_CPD, which simply
+calls update_ess N times, once for each data case, followed
+by maximize_params, i.e., it is like EM, without the E step.
+Some classes implement a batch formula, which is quicker.
+
+Bayesian learning means computing a posterior over the parameters
+given fully observed data.
+
+Pearl means we implement the methods compute_pi and
+compute_lambda_msg, used by
+pearl_inf_engine, which runs on directed graphs.
+belprop_inf_engine only needs convert_to_pot.H
+The pearl methods can exploit special properties of the CPDs for
+computing the messages efficiently, whereas belprop does not.
+
+The only method implemented by generic_CPD is adjustable_CPD,
+which is not shown, since it is not very interesting.
+
+
+
+
+In Figure (a) below, we show how Factor Analysis can be thought of as a
+graphical model. Here, X has an N(0,I) prior, and
+Y|X=x ~ N(mu + Wx, Psi),
+where Psi is diagonal and W is called the "factor loading matrix".
+Since the noise on both X and Y is diagonal, the components of these
+vectors are uncorrelated, and hence can be represented as individual
+scalar nodes, as we show in (b).
+(This is useful if parts of the observations on the Y vector are occasionally missing.)
+We usually take k=|X| << |Y|=D, so the model tries to explain
+many observations using a low-dimensional subspace.
+
+
+
+
+The root node is clamped to the N(0,I) distribution, so that we will
+not update these parameters during learning.
+The mean of the leaf node is clamped to 0,
+since we assume the data has been centered (had its mean subtracted
+off); this is just for simplicity.
+Finally, the covariance of the leaf node is constrained to be
+diagonal. W0 and Psi0 are the initial parameter guesses.
+
+
+We can fit this model (i.e., estimate its parameters in a maximum
+likelihood (ML) sense) using EM, as we
+explain below.
+Not surprisingly, the ML estimates for mu and Psi turn out to be
+identical to the
+sample mean and variance, which can be computed directly as
+
+mu_ML = mean(data);
+Psi_ML = diag(cov(data));
+
+Note that W can only be identified up to a rotation matrix, because of
+the spherical symmetry of the source.
+
+
+If we restrict Psi to be spherical, i.e., Psi = sigma*I,
+there is a closed-form solution for W as well,
+i.e., we do not need to use EM.
+In particular, W contains the first |X| eigenvectors of the sample covariance
+matrix, with scalings determined by the eigenvalues and sigma.
+Classical PCA can be obtained by taking the sigma->0 limit.
+For details, see
+
+
+By adding a hidden discrete variable, we can create mixtures of FA
+models, as shown in (c).
+Now we can explain the data using a set of subspaces.
+We can create this model in BNT as follows.
+
+Notice how the covariance matrix for Y is the same for all values of
+Q; that is, the noise level in each sub-space is assumed the same.
+However, we allow the offset, mu, to vary.
+For details, see
+
+I have included Zoubin's specialized MFA code (with his permission)
+with the toolbox, so you can check that BNT gives the same results:
+see 'BNT/examples/static/mfa1.m'.
+
+
+Independent Factor Analysis (IFA) generalizes FA by allowing a
+non-Gaussian prior on each component of X.
+(Note that we can approximate a non-Gaussian prior using a mixture of
+Gaussians.)
+This means that the likelihood function is no longer rotationally
+invariant, so we can uniquely identify W and the hidden
+sources X.
+IFA also allows a non-diagonal Psi (i.e. correlations between the components of Y).
+We recover classical Independent Components Analysis (ICA)
+in the Psi -> 0 limit, and by assuming that |X|=|Y|, so that the
+weight matrix W is square and invertible.
+For details, see
+
+
+As an example of the use of the softmax function,
+we introduce the Mixture of Experts model.
+
+As before,
+circles denote continuous-valued nodes,
+squares denote discrete nodes, clear
+means hidden, and shaded means observed.
+
+
+
+
+
+
+
+
+
+X is the observed
+input, Y is the output, and
+the Q nodes are hidden "gating" nodes, which select the appropriate
+set of parameters for Y. During training, Y is assumed observed,
+but for testing, the goal is to predict Y given X.
+Note that this is a conditional density model, so we don't
+associate any parameters with X.
+Hence X's CPD will be a root CPD, which is a way of modelling
+exogenous nodes.
+If the output is a continuous-valued quantity,
+we assume the "experts" are linear-regression units,
+and set Y's CPD to linear-Gaussian.
+If the output is discrete, we set Y's CPD to a softmax function.
+The Q CPDs will always be softmax functions.
+
+
+As a concrete example, consider the mixture of experts model where X and Y are
+scalars, and Q is binary.
+This is just piecewise linear regression, where
+we have two line segments, i.e.,
+
+
+
+We can create this model with random parameters as follows.
+(This code is bundled in BNT/examples/static/mixexp2.m.)
+
+This is what the model looks like before training.
+(Thanks to Thomas Hofman for writing this plotting routine.)
+
+
+
+
+
+Now let's train the model, and plot the final performance.
+(We will discuss how to train models in more detail below.)
+
+
+ncases = size(data, 1); % each row of data is a training case
+cases = cell(3, ncases);
+cases([1 3], :) = num2cell(data'); % each column of cases is a training case
+engine = jtree_inf_engine(bnet);
+max_iter = 20;
+[bnet2, LLtrace] = learn_params_em(engine, cases, max_iter);
+
+(We specify which nodes will be observed when we create the engine.
+Hence BNT knows that the hidden nodes are all discrete.
+For complex models, this can lead to a significant speedup.)
+Below we show what the model looks like after 16 iterations of EM
+(with 100 IRLS iterations per M step), when it converged
+using the default convergence tolerance (that the
+fractional change in the log-likelihood be less than 1e-3).
+Before learning, the log-likelihood was
+-322.927442; afterwards, it was -13.728778.
+
+
+
+
+(See BNT/examples/static/mixexp2.m for details of the code.)
+
+
+
+
+
+A hierarchical mixture of experts (HME) extends the mixture of experts
+model by having more than one hidden node. A two-level example is shown below, along
+with its more traditional representation as a neural network.
+This is like a (balanced) probabilistic decision tree of height 2.
+
+
+
+
+
+Pierpaolo Brutti
+has written an extensive set of routines for HMEs,
+which are bundled with BNT: see the examples/static/HME directory.
+These routines allow you to choose the number of hidden (gating)
+layers, and the form of the experts (softmax or MLP).
+See the file hmemenu, which provides a demo.
+For example, the figure below shows the decision boundaries learned
+for a ternary classification problem, using a 2 level HME with softmax
+gates and softmax experts; the training set is on the left, the
+testing set on the right.
+
"Generalized Linear Models", McCullagh and Nelder, Chapman and
+Halll, 1983.
+
+
+"Improved learning algorithms for mixtures of experts in multiclass
+classification".
+K. Chen, L. Xu, H. Chi.
+Neural Networks (1999) 12: 1229-1252.
+
+
+
+Bayes nets originally arose out of an attempt to add probabilities to
+expert systems, and this is still the most common use for BNs.
+A famous example is
+QMR-DT, a decision-theoretic reformulation of the Quick Medical
+Reference (QMR) model.
+
+
+
+
+Here, the top layer represents hidden disease nodes, and the bottom
+layer represents observed symptom nodes.
+The goal is to infer the posterior probability of each disease given
+all the symptoms (which can be present, absent or unknown).
+Each node in the top layer has a Bernoulli prior (with a low prior
+probability that the disease is present).
+Since each node in the bottom layer has a high fan-in, we use a
+noisy-OR parameterization; each disease has an independent chance of
+causing each symptom.
+The real QMR-DT model is copyright, but
+we can create a random QMR-like model as follows.
+
+function bnet = mk_qmr_bnet(G, inhibit, leak, prior)
+% MK_QMR_BNET Make a QMR model
+% bnet = mk_qmr_bnet(G, inhibit, leak, prior)
+%
+% G(i,j) = 1 iff there is an arc from disease i to finding j
+% inhibit(i,j) = inhibition probability on i->j arc
+% leak(j) = inhibition prob. on leak->j arc
+% prior(i) = prob. disease i is on
+
+[Ndiseases Nfindings] = size(inhibit);
+N = Ndiseases + Nfindings;
+finding_node = Ndiseases+1:N;
+ns = 2*ones(1,N);
+dag = zeros(N,N);
+dag(1:Ndiseases, finding_node) = G;
+bnet = mk_bnet(dag, ns, 'observed', finding_node);
+
+for d=1:Ndiseases
+ CPT = [1-prior(d) prior(d)];
+ bnet.CPD{d} = tabular_CPD(bnet, d, CPT');
+end
+
+for i=1:Nfindings
+ fnode = finding_node(i);
+ ps = parents(G, i);
+ bnet.CPD{fnode} = noisyor_CPD(bnet, fnode, leak(i), inhibit(ps, i));
+end
+
+In the file BNT/examples/static/qmr1, we create a random bipartite
+graph G, with 5 diseases and 10 findings, and random parameters.
+(In general, to create a random dag, use 'mk_random_dag'.)
+We can visualize the resulting graph structure using
+the methods discussed below, with the
+following results:
+
+
+
+
+Now let us put some random evidence on all the leaves except the very
+first and very last, and compute the disease posteriors.
+
+Junction tree can be quite slow on large QMR models.
+Fortunately, it is possible to exploit properties of the noisy-OR
+function to speed up exact inference using an algorithm called
+quickscore, discussed below.
+
+
+
+
+
+
+
+A conditional Gaussian model is one in which, conditioned on all the discrete
+nodes, the distribution over the remaining (continuous) nodes is
+multivariate Gaussian. This means we can have arcs from discrete (D)
+to continuous (C) nodes, but not vice versa.
+(We are allowed C->D arcs if the continuous nodes are observed,
+as in the mixture of experts model,
+since this distribution can be represented with a discrete potential.)
+
+We now give an example of a CG model, from
+the paper "Propagation of Probabilities, Means amd
+Variances in Mixed Graphical Association Models", Steffen Lauritzen,
+JASA 87(420):1098--1108, 1992 (reprinted in the book "Probabilistic Networks and Expert
+Systems", R. G. Cowell, A. P. Dawid, S. L. Lauritzen and
+D. J. Spiegelhalter, Springer, 1999.)
+
+
Specifying the graph
+
+Consider the model of waste emissions from an incinerator plant shown below.
+We follow the standard convention that shaded nodes are observed,
+clear nodes are hidden.
+We also use the non-standard convention that
+square nodes are discrete (tabular) and round nodes are
+Gaussian.
+
+
+
+
+
+
+
+We can create this model as follows.
+
+F = 1; W = 2; E = 3; B = 4; C = 5; D = 6; Min = 7; Mout = 8; L = 9;
+n = 9;
+
+dag = zeros(n);
+dag(F,E)=1;
+dag(W,[E Min D]) = 1;
+dag(E,D)=1;
+dag(B,[C D])=1;
+dag(D,[L Mout])=1;
+dag(Min,Mout)=1;
+
+% node sizes - all cts nodes are scalar, all discrete nodes are binary
+ns = ones(1, n);
+dnodes = [F W B];
+cnodes = mysetdiff(1:n, dnodes);
+ns(dnodes) = 2;
+
+bnet = mk_bnet(dag, ns, 'discrete', dnodes);
+
+'dnodes' is a list of the discrete nodes; 'cnodes' is the continuous
+nodes. 'mysetdiff' is a faster version of the built-in 'setdiff'.
+
+
+
+
Specifying the parameters
+
+The parameters of the discrete nodes can be specified as follows.
+
+
+'marg' is a structure that contains the fields 'mu' and 'Sigma', which
+contain the mean and (co)variance of the marginal on E.
+In this case, they are both scalars.
+Let us check they match the published figures (to 2 decimal places).
+
+
+It is easy to visualize this posterior using standard Matlab plotting
+functions, e.g.,
+
+gaussplot2d(marg.mu, marg.Sigma);
+
+produces the following picture.
+
+
+
+
+
+
+
+
+The T field indicates that the mixing weight of this Gaussian
+component is 1.0.
+If the joint contains discrete and continuous variables, the result
+will be a mixture of Gaussians, e.g.,
+
+The interpretation is
+Sigma(i,j,k) = Cov[ E(i) E(j) | F=k ].
+In this case, E is a scalar, so i=j=1; k specifies the mixture component.
+
+We saw in the sprinkler network that BNT sets the effective size of
+observed discrete nodes to 1, since they only have one legal value.
+For continuous nodes, BNT sets their length to 0,
+since they have been reduced to a point.
+For example,
+
+It is simple to post-process the output of marginal_nodes.
+For example, the file BNT/examples/static/cg1 sets the mu term of
+observed nodes to their observed value, and the Sigma term to 0 (since
+observed nodes have no variance).
+
+
+Note that the implemented version of the junction tree is numerically
+unstable when using CG potentials
+(which is why, in the example above, we only required our answers to agree with
+the published ones to 2dp.)
+This is why you might want to use stab_cond_gauss_inf_engine,
+implemented by Shan Huang. This is described in
+
+
+
"Stable Local Computation with Conditional Gaussian Distributions",
+S. Lauritzen and F. Jensen, Tech Report R-99-2014,
+Dept. Math. Sciences, Allborg Univ., 1999.
+
+
+However, even the numerically stable version
+can be computationally intractable if there are many hidden discrete
+nodes, because the number of mixture components grows exponentially e.g., in a
+switching linear dynamical system.
+In general, one must resort to approximate inference techniques: see
+the discussion on inference engines below.
+
+
+
+
+The parameter estimation routines in BNT can be classified into 4
+types, depending on whether the goal is to compute
+a full (Bayesian) posterior over the parameters or just a point
+estimate (e.g., Maximum Likelihood or Maximum A Posteriori),
+and whether all the variables are fully observed or there is missing
+data/ hidden variables (partial observability).
+
+
+To load numeric data from an ASCII text file called 'dat.txt', where each row is a
+case and columns are separated by white-space, such as
+
+011979 1626.5 0.0
+021979 1367.0 0.0
+...
+
+you can use
+
+data = load('dat.txt');
+
+or
+
+load dat.txt -ascii
+
+In the latter case, the data is stored in a variable called 'dat' (the
+filename minus the extension).
+Alternatively, suppose the data is stored in a .csv file (has commas
+separating the columns, and contains a header line), such as
+
+header info goes here
+ORD,011979,1626.5,0.0
+DSM,021979,1367.0,0.0
+...
+
+If your file is not in either of these formats, you can either use Perl to convert
+it to this format, or use the Matlab scanf command.
+Type
+
+help iofun
+
+for more information on Matlab's file functions.
+
+
+BNT learning routines require data to be stored in a cell array.
+data{i,m} is the value of node i in case (example) m, i.e., each
+column is a case.
+If node i is not observed in case m (missing value), set
+data{i,m} = [].
+(Not all the learning routines can cope with such missing values, however.)
+In the special case that all the nodes are observed and are
+scalar-valued (as opposed to vector-valued), the data can be
+stored in a matrix (as opposed to a cell-array).
+
+Suppose, as in the mixture of experts example,
+that we have 3 nodes in the graph: X(1) is the observed input, X(3) is
+the observed output, and X(2) is a hidden (gating) node. We can
+create the dataset as follows.
+
+
+As an example, let's generate some data from the sprinkler network, randomize the parameters,
+and then try to recover the original model.
+First we create some training data using forwards sampling.
+
+samples{j,i} contains the value of the j'th node in case i.
+sample_bnet returns a cell array because, in general, each node might
+be a vector of different length.
+In this case, all nodes are discrete (and hence scalars), so we
+could have used a regular array instead (which can be quicker):
+
+data = cell2num(samples);
+
+Now we create a network with random parameters.
+(The initial values of bnet2 don't matter in this case, since we can find the
+globally optimal MLE independent of where we start.)
+
+% Make a tabula rasa
+bnet2 = mk_bnet(dag, node_sizes);
+seed = 0;
+rand('state', seed);
+bnet2.CPD{C} = tabular_CPD(bnet2, C);
+bnet2.CPD{R} = tabular_CPD(bnet2, R);
+bnet2.CPD{S} = tabular_CPD(bnet2, S);
+bnet2.CPD{W} = tabular_CPD(bnet2, W);
+
+Finally, we find the maximum likelihood estimates of the parameters.
+
+bnet3 = learn_params(bnet2, samples);
+
+To view the learned parameters, we use a little Matlab hackery.
+
+
+Currently, only tabular CPDs can have priors on their parameters.
+The conjugate prior for a multinomial is the Dirichlet.
+(For binary random variables, the multinomial is the same as the
+Bernoulli, and the Dirichlet is the same as the Beta.)
+
+The Dirichlet has a simple interpretation in terms of pseudo counts.
+If we let N_ijk = the num. times X_i=k and Pa_i=j occurs in the
+training set, where Pa_i are the parents of X_i,
+then the maximum likelihood (ML) estimate is
+T_ijk = N_ijk / N_ij (where N_ij = sum_k' N_ijk'), which will be 0 if N_ijk=0.
+To prevent us from declaring that (X_i=k, Pa_i=j) is impossible just because this
+event was not seen in the training set,
+we can pretend we saw value k of X_i, for each value j of Pa_i some number (alpha_ijk)
+of times in the past.
+The MAP (maximum a posterior) estimate is then
+
+and is never 0 if all alpha_ijk > 0.
+For example, consider the network A->B, where A is binary and B has 3
+values.
+A uniform prior for B has the form
+
+ B=1 B=2 B=3
+A=1 1 1 1
+A=2 1 1 1
+
+which can be created using
+
+tabular_CPD(bnet, i, 'prior_type', 'dirichlet', 'dirichlet_type', 'unif');
+
+This prior does not satisfy the likelihood equivalence principle,
+which says that Markov equivalent models
+should have the same marginal likelihood.
+A prior that does satisfy this principle is shown below.
+Heckerman (1995) calls this the
+BDeu prior (likelihood equivalent uniform Bayesian Dirichlet).
+
+ B=1 B=2 B=3
+A=1 1/6 1/6 1/6
+A=2 1/6 1/6 1/6
+
+where we put N/(q*r) in each bin; N is the equivalent sample size,
+r=|A|, q = |B|.
+This can be created as follows
+
+tabular_CPD(bnet, i, 'prior_type', 'dirichlet', 'dirichlet_type', 'BDeu');
+
+Here, 1 is the equivalent sample size, and is the strength of the
+prior.
+You can change this using
+
+bnet.CPD{i}.prior contains the new Dirichlet pseudocounts,
+and bnet.CPD{i}.CPT is set to the mean of the posterior (the
+normalized counts).
+(Hence if the initial pseudo counts are 0,
+bayes_update_params and learn_params will give the
+same result.)
+
+
+
+
+
+We can compute the same result sequentially (on-line) as follows.
+
+
+The file BNT/examples/static/StructLearn/model_select1 has an example of
+sequential model selection which uses the same idea.
+We generate data from the model A->B
+and compute the posterior prob of all 3 dags on 2 nodes:
+ (1) A B, (2) A <- B , (3) A -> B
+Models 2 and 3 are Markov equivalent, and therefore indistinguishable from
+observational data alone, so we expect their posteriors to be the same
+(assuming a prior which satisfies likelihood equivalence).
+If we use random parameters, the "true" model only gets a higher posterior after 2000 trials!
+However, if we make B a noisy NOT gate, the true model "wins" after 12
+trials, as shown below (red = model 1, blue/green (superimposed)
+represents models 2/3).
+
+
+
+The use of marginal likelihood for model selection is discussed in
+greater detail in the
+section on structure learning.
+
+
+
+
+
+samples2{i,l} is the value of node i in training case l, or [] if unobserved.
+
+Now we will compute the MLEs using the EM algorithm.
+We need to use an inference algorithm to compute the expected
+sufficient statistics in the E step; the M (maximization) step is as
+above.
+
+
+In networks with repeated structure (e.g., chains and grids), it is
+common to assume that the parameters are the same at every node. This
+is called parameter tying, and reduces the amount of data needed for
+learning.
+
+When we have tied parameters, there is no longer a one-to-one
+correspondence between nodes and CPDs.
+Rather, each CPD species the parameters for a whole equivalence class
+of nodes.
+It is easiest to see this by example.
+Consider the following hidden Markov
+model (HMM)
+
+
+
+
+When HMMs are used for semi-infinite processes like speech recognition,
+we assume the transition matrix
+P(H(t+1)|H(t)) is the same for all t; this is called a time-invariant
+or homogenous Markov chain.
+Hence hidden nodes 2, 3, ..., T
+are all in the same equivalence class, say class Hclass.
+Similarly, the observation matrix P(O(t)|H(t)) is assumed to be the
+same for all t, so the observed nodes are all in the same equivalence
+class, say class Oclass.
+Finally, the prior term P(H(1)) is in a class all by itself, say class
+H1class.
+This is illustrated below, where we explicitly represent the
+parameters as random variables (dotted nodes).
+
+
+
+In BNT, we cannot represent parameters as random variables (nodes).
+Instead, we "hide" the
+parameters inside one CPD for each equivalence class,
+and then specify that the other CPDs should share these parameters, as
+follows.
+
+hnodes = 1:2:2*T;
+onodes = 2:2:2*T;
+H1class = 1; Hclass = 2; Oclass = 3;
+eclass = ones(1,N);
+eclass(hnodes(2:end)) = Hclass;
+eclass(hnodes(1)) = H1class;
+eclass(onodes) = Oclass;
+% create dag and ns in the usual way
+bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'equiv_class', eclass);
+
+Finally, we define the parameters for each equivalence class:
+
+In general, if bnet.CPD{e} = xxx_CPD(bnet, j), then j should be a
+member of e's equivalence class; that is, it is not always the case
+that e == j. You can use bnet.rep_of_eclass(e) to return the
+representative of equivalence class e.
+BNT will look up the parents of j to determine the size
+of the CPT to use. It assumes that this is the same for all members of
+the equivalence class.
+Click here for
+a more complex example of parameter tying.
+
+Note:
+Normally one would define an HMM as a
+Dynamic Bayes Net
+(see the function BNT/examples/dynamic/mk_chmm.m).
+However, one can define an HMM as a static BN using the function
+BNT/examples/static/Models/mk_hmm_bnet.m.
+
+
+
+
+
+Update (9/29/03):
+Phillipe LeRay is developing some additional structure learning code
+on top of BNT. Click here
+for details.
+
+
+
+There are two very different approaches to structure learning:
+constraint-based and search-and-score.
+In the constraint-based approach,
+we start with a fully connected graph, and remove edges if certain
+conditional independencies are measured in the data.
+This has the disadvantage that repeated independence tests lose
+statistical power.
+
+In the more popular search-and-score approach,
+we perform a search through the space of possible DAGs, and either
+return the best one found (a point estimate), or return a sample of the
+models found (an approximation to the Bayesian posterior).
+
+Unfortunately, the number of DAGs as a function of the number of
+nodes, G(n), is super-exponential in n.
+A closed form formula for G(n) is not known, but the first few values
+are shown below (from Cooper, 1999).
+
+
+
n
G(n)
+
1
1
+
2
3
+
3
25
+
4
543
+
5
29,281
+
6
3,781,503
+
7
1.1 x 10^9
+
8
7.8 x 10^11
+
9
1.2 x 10^15
+
10
4.2 x 10^18
+
+
+Since the number of DAGs is super-exponential in the number of nodes,
+we cannot exhaustively search the space, so we either use a local
+search algorithm (e.g., greedy hill climbining, perhaps with multiple
+restarts) or a global search algorithm (e.g., Markov Chain Monte
+Carlo).
+
+If we know a total ordering on the nodes,
+finding the best structure amounts to picking the best set of parents
+for each node independently.
+This is what the K2 algorithm does.
+If the ordering is unknown, we can search over orderings,
+which is more efficient than searching over DAGs (Koller and Friedman, 2000).
+
+In addition to the search procedure, we must specify the scoring
+function. There are two popular choices. The Bayesian score integrates
+out the parameters, i.e., it is the marginal likelihood of the model.
+The BIC (Bayesian Information Criterion) is defined as
+log P(D|theta_hat) - 0.5*d*log(N), where D is the data, theta_hat is
+the ML estimate of the parameters, d is the number of parameters, and
+N is the number of data cases.
+The BIC method has the advantage of not requiring a prior.
+
+BIC can be derived as a large sample
+approximation to the marginal likelihood.
+(It is also equal to the Minimum Description Length of a model.)
+However, in practice, the sample size does not need to be very large
+for the approximation to be good.
+For example, in the figure below, we plot the ratio between the log marginal likelihood
+and the BIC score against data-set size; we see that the ratio rapidly
+approaches 1, especially for non-informative priors.
+(This plot was generated by the file BNT/examples/static/bic1.m. It
+uses the water sprinkler BN with BDeu Dirichlet priors with different
+equivalent sample sizes.)
+
+
+
+
+
+
+
+
+As with parameter learning, handling missing data/ hidden variables is
+much harder than the fully observed case.
+The structure learning routines in BNT can therefore be classified into 4
+types, analogously to the parameter learning case.
+
+
+If two DAGs encode the same conditional independencies, they are
+called Markov equivalent. The set of all DAGs can be paritioned into
+Markov equivalence classes. Graphs within the same class can
+have
+the direction of some of their arcs reversed without changing any of
+the CI relationships.
+Each class can be represented by a PDAG
+(partially directed acyclic graph) called an essential graph or
+pattern. This specifies which edges must be oriented in a certain
+direction, and which may be reversed.
+
+
+When learning graph structure from observational data,
+the best one can hope to do is to identify the model up to Markov
+equivalence. To distinguish amongst graphs within the same equivalence
+class, one needs interventional data: see the discussion on active learning below.
+
+
+
+
+
+The brute-force approach to structure learning is to enumerate all
+possible DAGs, and score each one. This provides a "gold standard"
+with which to compare other algorithms. We can do this as follows.
+
+where data(i,m) is the value of node i in case m,
+and ns(i) is the size of node i.
+If the DAGs have a lot of families in common, we can cache the sufficient statistics,
+making this potentially more efficient than scoring the DAGs one at a time.
+(Caching is not currently implemented, however.)
+
+By default, we use the Bayesian scoring metric, and assume CPDs are
+represented by tables with BDeu(1) priors.
+We can override these defaults as follows.
+If we want to use uniform priors, we can say
+
+params{i} is a cell-array, containing optional arguments that are
+passed to the constructor for CPD i.
+
+Now suppose we want to use different node types, e.g.,
+Suppose nodes 1 and 2 are Gaussian, and nodes 3 and 4 softmax (both
+these CPDs can support discrete and continuous parents, which is
+necessary since all other nodes will be considered as parents).
+The Bayesian scoring metric currently only works for tabular CPDs, so
+we will use BIC:
+
+In practice, one can't enumerate all possible DAGs for N > 5,
+but one can evaluate any reasonably-sized set of hypotheses in this
+way (e.g., nearest neighbors of your current best guess).
+Think of this as "computer assisted model refinement" as opposed to de
+novo learning.
+
+
+
+
+The K2 algorithm (Cooper and Herskovits, 1992) is a greedy search algorithm that works as follows.
+Initially each node has no parents. It then adds incrementally that parent whose addition most
+increases the score of the resulting structure. When the addition of no single
+parent can increase the score, it stops adding parents to the node.
+Since we are using a fixed ordering, we do not need to check for
+cycles, and can choose the parents for each node independently.
+
+The original paper used the Bayesian scoring
+metric with tabular CPDs and Dirichlet priors.
+BNT generalizes this to allow any kind of CPD, and either the Bayesian
+scoring metric or BIC, as in the example above.
+In addition, you can specify
+an optional upper bound on the number of parents for each node.
+The file BNT/examples/static/k2demo1.m gives an example of how to use K2.
+We use the water sprinkler network and sample 100 cases from it as before.
+Then we see how much data it takes to recover the generating structure:
+
+So we see it takes about sz(10)=50 cases. (BIC behaves similarly,
+showing that the prior doesn't matter too much.)
+In general, we cannot hope to recover the "true" generating structure,
+only one that is in its Markov equivalence
+class.
+
+
+
+
+Hill-climbing starts at a specific point in space,
+considers all nearest neighbors, and moves to the neighbor
+that has the highest score; if no neighbors have higher
+score than the current point (i.e., we have reached a local maximum),
+the algorithm stops. One can then restart in another part of the space.
+
+A common definition of "neighbor" is all graphs that can be
+generated from the current graph by adding, deleting or reversing a
+single arc, subject to the acyclicity constraint.
+Other neighborhoods are possible: see
+
+Optimal Structure Identification with Greedy Search, Max
+Chickering, JMLR 2002.
+
+
+
+
+
+
+We can use a Markov Chain Monte Carlo (MCMC) algorithm called
+Metropolis-Hastings (MH) to search the space of all
+DAGs.
+The standard proposal distribution is to consider moving to all
+nearest neighbors in the sense defined above.
+
+The function can be called
+as in the following example.
+
+We can also plot the acceptance ratio versus number of MCMC steps,
+as a crude convergence diagnostic.
+
+clf
+plot(accept_ratio)
+
+
+
+Even though the number of samples needed by MCMC is theoretically
+polynomial (not exponential) in the dimensionality of the search space, in practice it has been
+found that MCMC does not converge in reasonable time for graphs with
+more than about 10 nodes.
+
+
+
+
+
+
+As was mentioned above,
+one can only learn a DAG up to Markov equivalence, even given infinite data.
+If one is interested in learning the structure of a causal network,
+one needs interventional data.
+(By "intervention" we mean forcing a node to take on a specific value,
+thereby effectively severing its incoming arcs.)
+
+Most of the scoring functions accept an optional argument
+that specifies whether a node was observed to have a certain value, or
+was forced to have that value: we set clamped(i,m)=1 if node i was
+forced in training case m. e.g., see the file
+BNT/examples/static/cooper_yoo.
+
+An interesting question is to decide which interventions to perform
+(c.f., design of experiments). For details, see the following tech
+report
+
+
+Computing the Bayesian score when there is partial observability is
+computationally challenging, because the parameter posterior becomes
+multimodal (the hidden nodes induce a mixture distribution).
+One therefore needs to use approximations such as BIC.
+Unfortunately, search algorithms are still expensive, because we need
+to run EM at each step to compute the MLE, which is needed to compute
+the score of each model. An alternative approach is
+to do the local search steps inside of the M step of EM, which is more
+efficient since the data has been "filled in" - this is
+called the structural EM algorithm (Friedman 1997), and provably
+converges to a local maximum of the BIC score.
+
+Wei Hu has implemented SEM for discrete nodes.
+You can download his package from
+here.
+Please address all questions about this code to
+wei.hu@intel.com.
+See also Phl's implementation of SEM.
+
+
+
+
+
+
+You can visualize an arbitrary graph (such as one learned using the
+structure learning routines) with Matlab code contributed by
+Ali
+Taylan Cemgil
+from the University of Nijmegen.
+For static BNs, call it as follows:
+
+If you install the excellent graphhviz, an
+open-source graph visualization package from AT&T,
+you can create a much better visualization as follows
+
+graph_to_dot(bnet.dag)
+
+This works by converting the adjacency matrix to a file suitable
+for input to graphviz (using the dot format),
+then converting the output of graphviz to postscript, and displaying the results using
+ghostview.
+You can do each of these steps separately for more control, as shown
+below.
+
+
+The IC algorithm (Pearl and Verma, 1991),
+and the faster, but otherwise equivalent, PC algorithm (Spirtes, Glymour, and Scheines 1993),
+computes many conditional independence tests,
+and combines these constraints into a
+PDAG to represent the whole
+Markov equivalence class.
+
+IC*/FCI extend IC/PC to handle latent variables: see below.
+(IC stands for inductive causation; PC stands for Peter and Clark,
+the first names of Spirtes and Glymour; FCI stands for fast causal
+inference.
+What we, following Pearl (2000), call IC* was called
+IC in the original Pearl and Verma paper.)
+For details, see
+
+
+The PC algorithm takes as arguments a function f, the number of nodes N,
+the maximum fan in K, and additional arguments A which are passed to f.
+The function f(X,Y,S,A) returns 1 if X is conditionally independent of Y given S, and 0
+otherwise.
+For example, suppose we cheat by
+passing in a CI "oracle" which has access to the true DAG; the oracle
+tests for d-separation in this DAG, i.e.,
+f(X,Y,S) calls dsep(X,Y,S,dag). We can to this as follows.
+
+pdag = learn_struct_pdag_pc('dsep', N, max_fan_in, dag);
+
+pdag(i,j) = -1 if there is definitely an i->j arc,
+and pdag(i,j) = 1 if there is either an i->j or and i<-j arc.
+
+So as expected, we see that the V-structure at the W node is uniquely identified,
+but the other arcs have ambiguous orientation.
+
+We now give an example from p141 (1st edn) / p103 (2nd end) of the SGS
+book.
+This example concerns the female orgasm.
+We are given a correlation matrix C between 7 measured factors (such
+as subjective experiences of coital and masturbatory experiences),
+derived from 281 samples, and want to learn a causal model of the
+data. We will not discuss the merits of this type of work here, but
+merely show how to reproduce the results in the SGS book.
+Their program,
+Tetrad,
+makes use of the Fisher Z-test for conditional
+independence, so we do the same:
+
+The results match those of Fig 12a of SGS apart from two edge
+differences; presumably this is due to rounding error (although it
+could be a bug, either in BNT or in Tetrad).
+This example can be found in the file BNT/examples/static/pc2.m.
+
+
+
+The IC* algorithm (Pearl and Verma, 1991),
+and the faster FCI algorithm (Spirtes, Glymour, and Scheines 1993),
+are like the IC/PC algorithm, except that they can detect the presence
+of latent variables.
+See the file learn_struct_pdag_ic_star written by Tamar
+Kushnir. The output is a matrix P, defined as follows
+(see Pearl (2000), p52 for details):
+
+% P(i,j) = -1 if there is either a latent variable L such that i <-L->j OR there is a directed edge from i->j.
+% P(i,j) = -2 if there is a marked directed i-*>j edge.
+% P(i,j) = P(j,i) = 1 if there is and undirected edge i--j
+% P(i,j) = P(j,i) = 2 if there is a latent variable L such that i<-L->j.
+
+
+Up until now, we have used the junction tree algorithm for inference.
+However, sometimes this is too slow, or not even applicable.
+In general, there are many inference algorithms each of which make
+different tradeoffs between speed, accuracy, complexity and
+generality. Furthermore, there might be many implementations of the
+same algorithm; for instance, a general purpose, readable version,
+and a highly-optimized, specialized one.
+To cope with this variety, we treat each inference algorithm as an
+object, which we call an inference engine.
+
+
+An inference engine is an object that contains a bnet and supports the
+'enter_evidence' and 'marginal_nodes' methods. The engine constructor
+takes the bnet as argument and may do some model-specific processing.
+When 'enter_evidence' is called, the engine may do some
+evidence-specific processing. Finally, when 'marginal_nodes' is
+called, the engine may do some query-specific processing.
+
+
+The amount of work done when each stage is specified -- structure,
+parameters, evidence, and query -- depends on the engine. The cost of
+work done early in this sequence can be amortized. On the other hand,
+one can make better optimizations if one waits until later in the
+sequence.
+For example, the parameters might imply
+conditional indpendencies that are not evident in the graph structure,
+but can nevertheless be exploited; the evidence indicates which nodes
+are observed and hence can effectively be disconnected from the
+graph; and the query might indicate that large parts of the network
+are d-separated from the query nodes. (Since it is not the actual
+values of the evidence that matters, just which nodes are observed,
+many engines allow you to specify which nodes will be observed when they are constructed,
+i.e., before calling 'enter_evidence'. Some engines can still cope if
+the actual pattern of evidence is different, e.g., if there is missing
+data.)
+
+
+Although being maximally lazy (i.e., only doing work when a query is
+issued) may seem desirable,
+this is not always the most efficient.
+For example,
+when learning using EM, we need to call marginal_nodes N times, where N is the
+number of nodes. Variable elimination would end
+up repeating a lot of work
+each time marginal_nodes is called, making it inefficient for
+learning. The junction tree algorithm, by contrast, uses dynamic
+programming to avoid this redundant computation --- it calculates all
+marginals in two passes during 'enter_evidence', so calling
+'marginal_nodes' takes constant time.
+
+We will discuss some of the inference algorithms implemented in BNT
+below, and finish with a summary of all
+of them.
+
+
+
+
+
+
+
+
+
+The variable elimination algorithm, also known as bucket elimination
+or peeling, is one of the simplest inference algorithms.
+The basic idea is to "push sums inside of products"; this is explained
+in more detail
+here.
+
+The principle of distributing sums over products can be generalized
+greatly to apply to any commutative semiring.
+This forms the basis of many common algorithms, such as Viterbi
+decoding and the Fast Fourier Transform. For details, see
+
+
+
R. McEliece and S. M. Aji, 2000.
+
+
+The Generalized Distributive Law,
+IEEE Trans. Inform. Theory, vol. 46, no. 2 (March 2000),
+pp. 325--343.
+
+
+
+Choosing an order in which to sum out the variables so as to minimize
+computational cost is known to be NP-hard.
+The implementation of this algorithm in
+var_elim_inf_engine makes no attempt to optimize this
+ordering (in contrast, say, to jtree_inf_engine, which uses a
+greedy search procedure to find a good ordering).
+
+Note: unlike most algorithms, var_elim does all its computational work
+inside of marginal_nodes, not inside of
+enter_evidence.
+
+
+
+
+
+
+The simplest inference algorithm of all is to explicitely construct
+the joint distribution over all the nodes, and then to marginalize it.
+This is implemented in global_joint_inf_engine.
+Since the size of the joint is exponential in the
+number of discrete (hidden) nodes, this is not a very practical algorithm.
+It is included merely for pedagogical and debugging purposes.
+
+Three specialized versions of this algorithm have also been implemented,
+corresponding to the cases where all the nodes are discrete (D), all
+are Gaussian (G), and some are discrete and some Gaussian (CG).
+They are called enumerative_inf_engine,
+gaussian_inf_engine,
+and cond_gauss_inf_engine respectively.
+
+Note: unlike most algorithms, these global inference algorithms do all their computational work
+inside of marginal_nodes, not inside of
+enter_evidence.
+
+
+
+
+The junction tree algorithm is quite slow on the QMR network,
+since the cliques are so big.
+One simple trick we can use is to notice that hidden leaves do not
+affect the posteriors on the roots, and hence do not need to be
+included in the network.
+A second trick is to notice that the negative findings can be
+"absorbed" into the prior:
+see the file
+BNT/examples/static/mk_minimal_qmr_bnet for details.
+
+
+A much more significant speedup is obtained by exploiting special
+properties of the noisy-or node, as done by the quickscore
+algorithm. For details, see
+
+
Heckerman, "A tractable inference algorithm for diagnosing multiple diseases", UAI 89.
+
Rish and Dechter, "On the impact of causal independence", UCI
+tech report, 1998.
+
+
+This has been implemented in BNT as a special-purpose inference
+engine, which can be created and used as follows:
+
+
+Even using quickscore, exact inference takes time that is exponential
+in the number of positive findings.
+Hence for large networks we need to resort to approximate inference techniques.
+See for example
+
+
T. Jaakkola and M. Jordan, "Variational probabilistic inference and the
+QMR-DT network", JAIR 10, 1999.
+
+
K. Murphy, Y. Weiss and M. Jordan, "Loopy belief propagation for approximate inference: an empirical study",
+ UAI 99.
+
+The latter approximation
+entails applying Pearl's belief propagation algorithm to a model even
+if it has loops (hence the name loopy belief propagation).
+Pearl's algorithm, implemented as pearl_inf_engine, gives
+exact results when applied to singly-connected graphs
+(a.k.a. polytrees, since
+the underlying undirected topology is a tree, but a node may have
+multiple parents).
+To apply this algorithm to a graph with loops,
+use pearl_inf_engine.
+This can use a centralized or distributed message passing protocol.
+You can use it as in the following example.
+
+We found that this algorithm often converges, and when it does, often
+is very accurate, but it depends on the precise setting of the
+parameter values of the network.
+(See the file BNT/examples/static/qmr1 to repeat the experiment for yourself.)
+Understanding when and why belief propagation converges/ works
+is a topic of ongoing research.
+
+pearl_inf_engine can exploit special structure in noisy-or
+and gmux nodes to compute messages efficiently.
+
+belprop_inf_engine is like pearl, but uses potentials to
+represent messages. Hence this is slower.
+
+belprop_fg_inf_engine is like belprop,
+but is designed for factor graphs.
+
+
+
+
+
+BNT now (Mar '02) has two sampling (Monte Carlo) inference algorithms:
+
+
likelihood_weighting_inf_engine which does importance
+sampling and can handle any node type.
+
gibbs_sampling_inf_engine, written by Bhaskara Marthi.
+Currently this can only handle tabular CPDs.
+For a much faster and more powerful Gibbs sampling program, see
+BUGS.
+
+Note: To generate samples from a network (which is not the same as inference!),
+use sample_bnet.
+
+
+
+
+
+
+The inference engines differ in many ways. Here are
+some of the major "axes":
+
+
Works for all topologies or makes restrictions?
+
Works for all node types or makes restrictions?
+
Exact or approximate inference?
+
+
+
+In terms of topology, most engines handle any kind of DAG.
+belprop_fg does approximate inference on factor graphs (FG), which
+can be used to represent directed, undirected, and mixed (chain)
+graphs.
+(In the future, we plan to support exact inference on chain graphs.)
+quickscore only works on QMR-like models.
+
+In terms of node types: algorithms that use potentials can handle
+discrete (D), Gaussian (G) or conditional Gaussian (CG) models.
+Sampling algorithms can essentially handle any kind of node (distribution).
+Other algorithms make more restrictive assumptions in exchange for
+speed.
+
+Finally, most algorithms are designed to give the exact answer.
+The belief propagation algorithms are exact if applied to trees, and
+in some other cases.
+Sampling is considered approximate, even though, in the limit of an
+infinite number of samples, it gives the exact answer.
+
+
+
+Here is a summary of the properties
+of all the engines in BNT which work on static networks.
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/confmat.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/confmat.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+
+
+
+Netlab Reference Manual confmat
+
+
+
+
confmat
+
+
+Purpose
+
+Compute a confusion matrix.
+
+
+Synopsis
+
+
+[C, rate] = confmat(y, t)
+
+
+
+Description
+
+[C, rate] = confmat(y, t) computes the confusion matrix C
+and classification performance rate for the predictions mat{y}
+compared with the targets t. The data is assumed to be in a
+1-of-N encoding, unless there is just one column, when it is assumed to
+be a 2 class problem with a 0-1 encoding. Each row of y and t
+corresponds to a single example.
+
+
In the confusion matrix, the rows represent the true classes and the
+columns the predicted classes. The vector rate has two entries:
+the percentage of correct classifications and the total number of
+correct classifications.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/conjgrad.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/conjgrad.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,101 @@
+
+
+
+Netlab Reference Manual conjgrad
+
+
+
+
conjgrad
+
+
+Purpose
+
+Conjugate gradients optimization.
+
+
+Description
+
+[x, options, flog, pointlog] = conjgrad(f, x, options, gradf) uses a
+conjugate gradients
+algorithm to find the minimum of the function f(x) whose
+gradient is given by gradf(x). Here x is a row vector
+and f returns a scalar value.
+The point at which f has a local minimum
+is returned as x. The function value at that point is returned
+in options(8). A log of the function values
+after each cycle is (optionally) returned in flog, and a log
+of the points visited is (optionally) returned in pointlog.
+
+
conjgrad(f, x, options, gradf, p1, p2, ...) allows
+additional arguments to be passed to f() and gradf().
+
+
The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values; also logs error
+values in the return argument errlog, and the points visited
+in the return argument pointslog. If options(1) is set to 0,
+then only warning messages are displayed. If options(1) is -1,
+then nothing is displayed.
+
+
options(2) is a measure of the absolute precision required for the value
+of x at the solution. If the absolute difference between
+the values of x between two successive steps is less than
+options(2), then this condition is satisfied.
+
+
options(3) is a measure of the precision required of the objective
+function at the solution. If the absolute difference between the
+objective function values between two successive steps is less than
+options(3), then this condition is satisfied.
+Both this and the previous condition must be
+satisfied for termination.
+
+
options(9) is set to 1 to check the user defined gradient function.
+
+
options(10) returns the total number of function evaluations (including
+those in any line searches).
+
+
options(11) returns the total number of gradient evaluations.
+
+
options(14) is the maximum number of iterations; default 100.
+
+
options(15) is the precision in parameter space of the line search;
+default 1e-4.
+
+
+Examples
+
+An example of
+the use of the additional arguments is the minimization of an error
+function for a neural network:
+
+
+The conjugate gradients algorithm constructs search
+directions di that are conjugate: i.e. di*H*d(i-1) = 0,
+where H is the Hessian matrix. This means that minimising along
+di does not undo the effect of minimising along the previous
+direction. The Polak-Ribiere formula is used to calculate new search
+directions. The Hessian is not calculated, so there is only an
+O(W) storage requirement (where W is the number of
+parameters). However, relatively accurate line searches must be used
+(default is 1e-04).
+
+
errstring = consist(net, type, inputs) takes a network
+data structure net together with a string type containing
+the correct network type, a matrix inputs of input vectors and checks
+that the data structure is consistent with the other arguments. An empty
+string is returned if there is no error, otherwise the string contains the
+relevant error message. If the type string is empty, then any
+type of network is allowed.
+
+
errstring = consist(net, type) takes a network data structure
+net together with a string type containing the correct
+network type, and checks that the two types match.
+
+
errstring = consist(net, type, inputs, outputs) also checks that the
+network has the correct number of outputs, and that the number of patterns
+in the inputs and outputs is the same. The fields in net
+that are used are
+
+ type
+ nin
+ nout
+
+
+
+
+Example
+
+
+
mlpfwd, the function that propagates values forward through an MLP
+network, has the following check at the head of the file:
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/convertoldnet.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/convertoldnet.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+
+
+
+Netlab Reference Manual convertoldnet
+
+
+
+
convertoldnet
+
+
+Purpose
+
+Convert pre-2.3 release MLP and MDN nets to new format
+
+
+Synopsis
+
+
+net = convertoldnet(net)
+
+
+
+
+Description
+
+net = convertoldnet(net) takes a network net and, if appropriate,
+converts it from pre-2.3 to the current format. The difference is simply
+that in MLPs and the MLP sub-net of MDNs the field actfn has been
+renamed outfn to make it consistent with GLM and RBF networks.
+If the network is not old-format or an MLP or MDN it is left unchanged.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/datread.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/datread.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+
+
+
+Netlab Reference Manual datread
+
+
+
+
datread
+
+
+Purpose
+
+Read data from an ascii file.
+
+
+Synopsis
+
+
+[x, t, nin, nout, ndata] = datread(filename)
+
+
+
+
+Description
+
+
+
[x, t, nin, nout, ndata] = datread(filename) reads from
+the file filename and returns a matrix x of input vectors,
+a matrix t of target vectors, and integers nin, nout
+and ndata specifying the number of inputs, the number of outputs
+and the number of data points respectively.
+
+
The format of the data file is as follows: the first row contains the
+string nin followed by the number of inputs, the second row
+contains the string nout followed by the number of outputs, and
+the third row contains the string ndata followed by the number
+of data vectors. Subsequent lines each contain one input vector
+followed by one output vector, with individual values separated by
+spaces.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/datwrite.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/datwrite.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+
+
+
+Netlab Reference Manual datwrite
+
+
+
+
datwrite
+
+
+Purpose
+
+Write data to ascii file.
+
+
+Synopsis
+
+
+datwrite(filename, x, t)
+
+
+
+
+Description
+
+
+
datwrite(filename, x, t) takes a matrix x of input vectors
+and a matrix t of target vectors and writes them to an ascii
+file named filename. The file format is as follows: the first
+row contains the string nin followed by the number of inputs,
+the second row contains the string nout followed by the number
+of outputs, and the third row contains the string ndata followed
+by the number of data vectors. Subsequent lines each contain one input
+vector followed by one output vector, with individual values separated
+by spaces.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/dem2ddat.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/dem2ddat.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+
+
+
+Netlab Reference Manual dem2ddat
+
+
+
+
dem2ddat
+
+
+Purpose
+
+Generates two dimensional data for demos.
+
+
+Synopsis
+
+
+data = dem2ddat(ndata)
+
+
+[data, c] = dem2ddat(ndata)
+
+
+
+Description
+
+The data is
+drawn from three spherical Gaussian distributions with priors 0.3,
+0.5 and 0.2; centres (2, 3.5), (0, 0) and (0,2); and standard deviations
+0.2, 0.5 and 1.0. data = dem2ddat(ndata) generates ndata
+points.
+
+
[data, c] = dem2ddat(ndata) also returns a matrix containing the
+centres of the Gaussian distributions.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demard.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demard.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+
+
+
+Netlab Reference Manual demard
+
+
+
+
demard
+
+
+Purpose
+
+Automatic relevance determination using the MLP.
+
+
+Synopsis
+
+
+demmlp1
+
+
+
+Description
+
+This script demonstrates the technique of automatic relevance
+determination (ARD) using a synthetic problem having three input
+variables: x1 is sampled uniformly from the range (0,1) and has
+a low level of added Gaussian noise, x2 is a copy of x1
+with a higher level of added noise, and x3 is sampled randomly
+from a Gaussian distribution. The single target variable is determined
+by sin(2*pi*x1) with additive Gaussian noise. Thus x1 is
+very relevant for determining the target value, x2 is of some
+relevance, while x3 is irrelevant. The prior over weights is
+given by the ARD Gaussian prior with a separate hyper-parameter for
+the group of weights associated with each input. A multi-layer
+perceptron is trained on this data, with re-estimation of the
+hyper-parameters using evidence. The final values for the
+hyper-parameters reflect the relative importance of the three inputs.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demev1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demev1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+
+
+
+Netlab Reference Manual demev1
+
+
+
+
demev1
+
+
+Purpose
+
+Demonstrate Bayesian regression for the MLP.
+
+
+Synopsis
+
+
+demev1
+
+
+
+Description
+
+The problem consists an input variable x which sampled from a
+Gaussian distribution, and a target variable t generated by
+computing sin(2*pi*x) and adding Gaussian noise. A 2-layer
+network with linear outputs is trained by minimizing a sum-of-squares
+error function with isotropic Gaussian regularizer, using the scaled
+conjugate gradient optimizer. The hyperparameters alpha and
+beta are re-estimated using the function evidence. A graph
+is plotted of the original function, the training data, the trained
+network function, and the error bars.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demev2.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demev2.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+
+
+
+Netlab Reference Manual demev2
+
+
+
+
demev2
+
+
+Purpose
+
+Demonstrate Bayesian classification for the MLP.
+
+
+Synopsis
+
+
+demev2
+
+
+
+Description
+
+A synthetic two class two-dimensional dataset x is sampled
+from a mixture of four Gaussians. Each class is
+associated with two of the Gaussians so that the optimal decision
+boundary is non-linear.
+A 2-layer
+network with logistic outputs is trained by minimizing the cross-entropy
+error function with isotroipc Gaussian regularizer (one hyperparameter for
+each of the four standard weight groups), using the scaled
+conjugate gradient optimizer. The hyperparameter vectors alpha and
+beta are re-estimated using the function evidence. A graph
+is plotted of the optimal, regularised, and unregularised decision
+boundaries. A further plot of the moderated versus unmoderated contours
+is generated.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demev3.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demev3.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+
+
+
+Netlab Reference Manual demev3
+
+
+
+
demev3
+
+
+Purpose
+
+Demonstrate Bayesian regression for the RBF.
+
+
+Synopsis
+
+
+demev3
+
+
+
+Description
+
+The problem consists an input variable x which sampled from a
+Gaussian distribution, and a target variable t generated by
+computing sin(2*pi*x) and adding Gaussian noise. An RBF
+network with linear outputs is trained by minimizing a sum-of-squares
+error function with isotropic Gaussian regularizer, using the scaled
+conjugate gradient optimizer. The hyperparameters alpha and
+beta are re-estimated using the function evidence. A graph
+is plotted of the original function, the training data, the trained
+network function, and the error bars.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgauss.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgauss.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+
+
+
+Netlab Reference Manual demgauss
+
+
+
+
demgauss
+
+
+Purpose
+
+Demonstrate sampling from Gaussian distributions.
+
+
+Synopsis
+
+
+demgauss
+
+
+
+
+Description
+
+
+
demgauss provides a simple illustration of the generation of
+data from Gaussian distributions. It first samples from a
+one-dimensional distribution using randn, and then plots a
+normalized histogram estimate of the distribution using histp
+together with the true density calculated using gauss.
+
+
demgauss then demonstrates sampling from a Gaussian distribution
+in two dimensions. It creates a mean vector and a covariance matrix,
+and then plots contours of constant density using the function
+gauss. A sample of points drawn from this distribution, obtained
+using the function gsamp, is then superimposed on the contours.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demglm1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demglm1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual demglm1
+
+
+
+
demglm1
+
+
+Purpose
+
+Demonstrate simple classification using a generalized linear model.
+
+
+Synopsis
+
+
+demglm1
+
+
+
+Description
+
+
+The problem consists of a two dimensional input
+matrix data and a vector of classifications t. The data is
+generated from two Gaussian clusters, and a generalized linear model
+with logistic output is trained using iterative reweighted least squares.
+A plot of the data together with the 0.1, 0.5 and 0.9 contour lines
+of the conditional probability is generated.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demglm2.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demglm2.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual demglm2
+
+
+
+
demglm2
+
+
+Purpose
+
+Demonstrate simple classification using a generalized linear model.
+
+
+Synopsis
+
+
+demglm1
+
+
+
+Description
+
+
+The problem consists of a two dimensional input
+matrix data and a vector of classifications t. The data is
+generated from three Gaussian clusters, and a generalized linear model
+with softmax output is trained using iterative reweighted least squares.
+A plot of the data together with regions shaded by the classification
+given by the network is generated.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+
+
+
+Netlab Reference Manual demgmm1
+
+
+
+
demgmm1
+
+
+Purpose
+
+Demonstrate EM for Gaussian mixtures.
+
+
+Synopsis
+
+
+demgmm1
+
+
+
+Description
+
+This script demonstrates the use of the EM algorithm to fit a mixture
+of Gaussians to a set of data using maximum likelihood. A colour
+coding scheme is used to illustrate the evaluation of the posterior
+probabilities in the E-step of the EM algorithm.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm2.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm2.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+
+
+
+Netlab Reference Manual demgmm1
+
+
+
+
demgmm1
+
+
+Purpose
+
+Demonstrate density modelling with a Gaussian mixture model.
+
+
+Synopsis
+
+
+demgmm1
+
+
+
+Description
+
+The problem consists of modelling data generated by a mixture of three
+Gaussians in 2 dimensions. The priors are 0.3,
+0.5 and 0.2; the centres are (2, 3.5), (0, 0) and (0,2); the variances
+are 0.2, 0.5 and 1.0. The first figure contains a
+ scatter plot of the data.
+
+
A Gaussian mixture model with three components is trained using EM. The
+parameter vector is printed before training and after training. The user
+should press any key to continue at these points. The parameter vector
+consists of priors (the column), centres (given as (x, y) pairs as
+the next two columns), and variances (the last column).
+
+
The second figure is a 3 dimensional view of the density function, while
+the third shows the 1-standard deviation circles for the three components of
+the mixture model.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm3.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm3.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+
+
+
+Netlab Reference Manual demgmm3
+
+
+
+
demgmm3
+
+
+Purpose
+
+Demonstrate density modelling with a Gaussian mixture model.
+
+
+Synopsis
+
+
+demgmm3
+
+
+
+Description
+
+
+The problem consists of modelling data generated
+by a mixture of three Gaussians in 2 dimensions with a mixture model
+using diagonal covariance matrices. The priors are 0.3, 0.5 and 0.2; the
+centres are (2, 3.5), (0, 0) and (0,2); the covariances are all axis aligned
+(0.16, 0.64), (0.25, 1) and the identity
+matrix. The first figure contains a scatter plot of the data.
+
+
A Gaussian mixture model with three components is trained using EM. The
+parameter vector is printed before training and after training. The user
+should press any key to continue at these points. The parameter vector
+consists of priors (the column), and centres (given as (x, y) pairs as
+the next two columns). The diagonal entries of the
+covariance matrices are printed separately.
+
+
The second figure is a 3 dimensional view of the density function,
+while the third shows the axes of the 1-standard deviation circles
+for the three components of the mixture model.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm4.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm4.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,54 @@
+
+
+
+Netlab Reference Manual demgmm4
+
+
+
+
demgmm4
+
+
+Purpose
+
+Demonstrate density modelling with a Gaussian mixture model.
+
+
+Synopsis
+
+
+demgmm4
+
+
+
+Description
+
+
+The problem consists of modelling data generated
+by a mixture of three Gaussians in 2 dimensions with a mixture model
+using full covariance matrices. The priors are 0.3, 0.5 and 0.2; the
+centres are (2, 3.5), (0, 0) and (0,2); the variances are (0.16, 0.64)
+axis aligned, (0.25, 1) rotated by 30 degrees and the identity
+matrix. The first figure contains a scatter plot of the data.
+
+
A Gaussian mixture model with three components is trained using EM. The
+parameter vector is printed before training and after training. The user
+should press any key to continue at these points. The parameter vector
+consists of priors (the column), and centres (given as (x, y) pairs as
+the next two columns). The covariance matrices are printed separately.
+
+
The second figure is a 3 dimensional view of the density function,
+while the third shows the axes of the 1-standard deviation ellipses
+for the three components of the mixture model.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm5.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgmm5.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+
+
+
+Netlab Reference Manual demgmm5
+
+
+
+
demgmm5
+
+
+Purpose
+
+Demonstrate density modelling with a PPCA mixture model.
+
+
+Synopsis
+
+
+demgmm5
+
+
+
+Description
+
+
+The problem consists of modelling data generated
+by a mixture of three Gaussians in 2 dimensions with a mixture model
+using full covariance matrices. The priors are 0.3, 0.5 and 0.2; the
+centres are (2, 3.5), (0, 0) and (0,2); the variances are (0.16, 0.64)
+axis aligned, (0.25, 1) rotated by 30 degrees and the identity
+matrix. The first figure contains a scatter plot of the data.
+
+
A mixture model with three one-dimensional PPCA components is trained
+using EM. The parameter vector is printed before training and after
+training. The parameter vector consists of priors (the column), and
+centres (given as (x, y) pairs as the next two columns).
+
+
The second figure is a 3 dimensional view of the density function,
+while the third shows the axes of the 1-standard deviation ellipses
+for the three components of the mixture model together with the one
+standard deviation along the principal component of each mixture
+model component.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgp.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgp.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual demgp
+
+
+
+
demgp
+
+
+Purpose
+
+Demonstrate simple regression using a Gaussian Process.
+
+
+Synopsis
+
+
+demgp
+
+
+
+Description
+
+The problem consists of one input variable x and one target variable
+t. The values in x are chosen in two separated clusters and the
+target data is generated by computing sin(2*pi*x) and adding Gaussian
+noise. Two Gaussian Processes, each with different covariance functions
+are trained by optimising the hyperparameters
+using the scaled conjugate gradient algorithm. The final predictions are
+plotted together with 2 standard deviation error bars.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgpard.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgpard.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+
+
+
+Netlab Reference Manual demgpard
+
+
+
+
demgpard
+
+
+Purpose
+
+Demonstrate ARD using a Gaussian Process.
+
+
+Synopsis
+
+
+demgpare
+
+
+
+Description
+
+The data consists of three input variables x1, x2 and
+x3, and one target variable
+t. The
+target data is generated by computing sin(2*pi*x1) and adding Gaussian
+noise, x2 is a copy of x1 with a higher level of added
+noise, and x3 is sampled randomly from a Gaussian distribution.
+A Gaussian Process, is
+trained by optimising the hyperparameters
+using the scaled conjugate gradient algorithm. The final values of the
+hyperparameters show that the model successfully identifies the importance
+of each input.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgpot.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgpot.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual demgpot
+
+
+
+
demgpot
+
+
+Purpose
+
+Computes the gradient of the negative log likelihood for a mixture model.
+
+
+Synopsis
+
+
+g = demgpot(x, mix)
+
+
+
+Description
+
+This function computes the gradient of the negative log of the unconditional data
+density p(x) with respect to the coefficients of the
+data vector x for a Gaussian mixture model. The data structure
+mix defines the mixture model, while the matrix x contains
+the data vector as a row vector. Note the unusual order of the arguments:
+this is so that the function can be used in demhmc1 directly for
+sampling from the distribution p(x).
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgtm1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgtm1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual demgtm1
+
+
+
+
demgtm1
+
+
+Purpose
+
+Demonstrate EM for GTM.
+
+
+Synopsis
+
+
+demgtm1
+
+
+
+Description
+
+
+This script demonstrates the use of the EM
+algorithm to fit a one-dimensional GTM to a two-dimensional set of data
+using maximum likelihood. The location and spread of the Gaussian kernels
+in the data space is shown during training.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demgtm2.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demgtm2.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+
+
+
+Netlab Reference Manual demgtm2
+
+
+
+
demgtm2
+
+
+Purpose
+
+Demonstrate GTM for visualisation.
+
+
+Synopsis
+
+
+demgtm2
+
+
+
+Description
+
+
+This script demonstrates the use of a
+GTM with a two-dimensional latent space to visualise data in a higher
+dimensional space.
+This is done through the use of the mean responsibility and magnification
+factors.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demhint.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demhint.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+
+
+
+Netlab Reference Manual demhint
+
+
+
+
demhint
+
+
+Purpose
+
+Demonstration of Hinton diagram for 2-layer feed-forward network.
+
+
+Synopsis
+
+
+demhint
+demhint(nin, nhidden, nout)
+
+
+
+Description
+
+
+
demhint plots a Hinton diagram for a 2-layer feedforward network
+with 5 inputs, 4 hidden units and 3 outputs. The weight vector is
+chosen from a Gaussian distribution as described under mlp.
+
+
demhint(nin, nhidden, nout) allows the user to specify the
+number of inputs, hidden units and outputs.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demhmc1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demhmc1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+
+
+
+Netlab Reference Manual demhmc1
+
+
+
+
demhmc1
+
+
+Purpose
+
+Demonstrate Hybrid Monte Carlo sampling on mixture of two Gaussians.
+
+
+Synopsis
+
+
+demhmc1
+
+
+
+Description
+
+The problem consists of generating data from a mixture of two Gaussians
+in two dimensions using a hybrid Monte Carlo algorithm with persistence.
+A mixture model is then fitted to the sample to compare it with the
+true underlying generator.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demhmc2.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demhmc2.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+
+
+
+Netlab Reference Manual demhmc2
+
+
+
+
demhmc2
+
+
+Purpose
+
+Demonstrate Bayesian regression with Hybrid Monte Carlo sampling.
+
+
+Synopsis
+
+
+demhmc2
+
+
+
+Description
+
+The problem consists of one input variable x and one target variable
+t with data generated by sampling x at equal intervals and then
+generating target data by computing sin(2*pi*x) and adding Gaussian
+noise. The model is a 2-layer network with linear outputs, and the hybrid Monte
+Carlo algorithm (without persistence) is used to sample from the posterior
+distribution of the weights. The graph shows the underlying function,
+100 samples from the function given by the posterior distribution of the
+weights, and the average prediction (weighted by the posterior probabilities).
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demhmc3.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demhmc3.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+
+
+
+Netlab Reference Manual demhmc3
+
+
+
+
demhmc3
+
+
+Purpose
+
+Demonstrate Bayesian regression with Hybrid Monte Carlo sampling.
+
+
+Synopsis
+
+
+demhmc3
+
+
+
+Description
+
+The problem consists of one input variable x and one target variable
+t with data generated by sampling x at equal intervals and then
+generating target data by computing sin(2*pi*x) and adding Gaussian
+noise. The model is a 2-layer network with linear outputs, and the hybrid Monte
+Carlo algorithm (with persistence) is used to sample from the posterior
+distribution of the weights. The graph shows the underlying function,
+300 samples from the function given by the posterior distribution of the
+weights, and the average prediction (weighted by the posterior probabilities).
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demkmn1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demkmn1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+
+
+
+Netlab Reference Manual demkmean
+
+
+
+
demkmean
+
+
+Purpose
+
+Demonstrate simple clustering model trained with K-means.
+
+
+Synopsis
+
+
+demkmean
+
+
+
+Description
+
+The problem consists of data in a two-dimensional space.
+The data is
+drawn from three spherical Gaussian distributions with priors 0.3,
+0.5 and 0.2; centres (2, 3.5), (0, 0) and (0,2); and standard deviations
+0.2, 0.5 and 1.0. The first figure contains a
+scatter plot of the data. The data is the same as in demgmm1.
+
+
A cluster model with three components is trained using the batch
+K-means algorithm. The matrix of centres is printed after training.
+The second
+figure shows the data labelled with a colour derived from the corresponding
+cluster
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demknn1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demknn1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+
+
+
+Netlab Reference Manual demknn1
+
+
+
+
demknn1
+
+
+Purpose
+
+Demonstrate nearest neighbour classifier.
+
+
+Synopsis
+
+
+demknn1
+
+
+
+Description
+
+The problem consists of data in a two-dimensional space.
+The data is
+drawn from three spherical Gaussian distributions with priors 0.3,
+0.5 and 0.2; centres (2, 3.5), (0, 0) and (0,2); and standard deviations
+0.2, 0.5 and 1.0. The first figure contains a
+scatter plot of the data. The data is the same as in demgmm1.
+
+
The second
+figure shows the data labelled with the corresponding class given
+by the classifier.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demmdn1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demmdn1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+
+
+
+Netlab Reference Manual demmdn1
+
+
+
+
demmdn1
+
+
+Purpose
+
+Demonstrate fitting a multi-valued function using a Mixture Density Network.
+
+
+Synopsis
+
+
+demmdn1
+
+
+
+Description
+
+The problem consists of one input variable
+x and one target variable t with data generated by
+sampling t at equal intervals and then generating target data by
+computing t + 0.3*sin(2*pi*t) and adding Gaussian noise. A
+Mixture Density Network with 3 centres in the mixture model is trained
+by minimizing a negative log likelihood error function using the scaled
+conjugate gradient optimizer.
+
+
The conditional means, mixing coefficients and variances are plotted
+as a function of x, and a contour plot of the full conditional
+density is also generated.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demmet1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demmet1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual demmet1
+
+
+
+
demmet1
+
+
+Purpose
+
+Demonstrate Markov Chain Monte Carlo sampling on a Gaussian.
+
+
+Synopsis
+
+
+demmet1
+demmet1(plotwait)
+
+
+
+Description
+
+The problem consists of generating data from a Gaussian
+in two dimensions using a Markov Chain Monte Carlo algorithm. The points are
+plotted one after another to show the path taken by the chain.
+
+
demmet1(plotwait) allows the user to set the time (in a whole number
+of seconds) between the plotting of points. This is passed to pause
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demmlp1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demmlp1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual demmlp1
+
+
+
+
demmlp1
+
+
+Purpose
+
+Demonstrate simple regression using a multi-layer perceptron
+
+
+Synopsis
+
+
+demmlp1
+
+
+
+Description
+
+The problem consists of one input variable x and one target variable
+t with data generated by sampling x at equal intervals and then
+generating target data by computing sin(2*pi*x) and adding Gaussian
+noise. A 2-layer network with linear outputs is trained by minimizing a
+sum-of-squares error function using the scaled conjugate gradient optimizer.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demmlp2.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demmlp2.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+
+
+
+Netlab Reference Manual demmlp2
+
+
+
+
demmlp2
+
+
+Purpose
+
+Demonstrate simple classification using a multi-layer perceptron
+
+
+Synopsis
+
+
+demmlp2
+
+
+
+Description
+
+The problem consists of input data in two dimensions drawn from a mixture
+of three Gaussians: two of which are assigned to a single class. An MLP
+with logistic outputs trained with a quasi-Newton optimisation algorithm is
+compared with the optimal Bayesian decision rule.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demnlab.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demnlab.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+
+
+
+Netlab Reference Manual demnlab
+
+
+
+
demnlab
+
+
+Purpose
+
+A front-end Graphical User Interface to the demos
+
+
+Synopsis
+
+
+demnlab
+
+
+
+Description
+
+This function will start a user interface allowing the user to select
+different demonstration functions to view. The demos are divided into 4
+groups, with the demo being executed by selecting the desired option
+from a pop-up menu.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demns1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demns1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual demns1
+
+
+
+
demns1
+
+
+Purpose
+
+Demonstrate Neuroscale for visualisation.
+
+
+Synopsis
+
+
+demns1
+
+
+
+Description
+
+This script demonstrates the use of the Neuroscale algorithm for
+topographic projection and visualisation. A data sample is generated
+from a mixture of two Gaussians in 4d space, and an RBF is trained
+with the stress error function to project the data into 2d. The training
+data and a test sample are both plotted in this projection.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demolgd1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demolgd1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual demolgd1
+
+
+
+
demolgd1
+
+
+Purpose
+
+Demonstrate simple MLP optimisation with on-line gradient descent
+
+
+Synopsis
+
+
+demolgd1
+
+
+
+Description
+
+The problem consists of one input variable x and one target variable
+t with data generated by sampling x at equal intervals and then
+generating target data by computing sin(2*pi*x) and adding Gaussian
+noise. A 2-layer network with linear outputs is trained by minimizing a
+sum-of-squares error function using on-line gradient descent.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demopt1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demopt1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+
+
+
+Netlab Reference Manual demopt1
+
+
+
+
demopt1
+
+
+Purpose
+
+Demonstrate different optimisers on Rosenbrock's function.
+
+
+Synopsis
+
+
+demopt1
+demopt1(xinit)
+
+
+
+
+Description
+
+The four general optimisers (quasi-Newton, conjugate gradients,
+scaled conjugate gradients, and gradient descent) are applied to
+the minimisation of Rosenbrock's well known `banana' function.
+Each optimiser is run for at most 100 cycles, and a stopping
+criterion of 1.0e-4 is used for both position and function value.
+At the end, the trajectory of each algorithm is shown on a contour
+plot of the function.
+
+
demopt1(xinit) allows the user to specify a row vector with
+two columns as the starting point. The default is the point [-1 1].
+Note that the contour plot has an x range of [-1.5, 1.5] and a y
+range of [-0.5, 2.1], so it is best to choose a starting point in the
+same region.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/dempot.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/dempot.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+
+
+
+Netlab Reference Manual dempot
+
+
+
+
dempot
+
+
+Purpose
+
+Computes the negative log likelihood for a mixture model.
+
+
+Synopsis
+
+
+e = dempot(x, mix)
+
+
+
+Description
+
+This function computes the negative log of the unconditional data
+density p(x) for a Gaussian mixture model. The data structure
+mix defines the mixture model, while the matrix x contains
+the data vectors.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demprgp.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demprgp.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual demprgp
+
+
+
+
demprgp
+
+
+Purpose
+
+Demonstrate sampling from a Gaussian Process prior.
+
+
+Synopsis
+
+
+demprgp
+
+
+
+Description
+
+This function plots the functions represented by a Gaussian Process
+model. The hyperparameter values can be adjusted
+on a linear scale using the sliders (though the exponential
+of the parameters is used in the covariance function), or
+by typing values into the text boxes and pressing the return key.
+Both types of covariance function are supported. An extra function
+specific parameter is needed for the rational quadratic function.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demprior.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demprior.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual demprior
+
+
+
+
demprior
+
+
+Purpose
+
+Demonstrate sampling from a multi-parameter Gaussian prior.
+
+
+Synopsis
+
+
+demprior
+
+
+
+Description
+
+This function plots the functions represented by a multi-layer perceptron
+network when the weights are set to values drawn from a Gaussian prior
+distribution. The parameters aw1, ab1aw2 and ab2
+control the inverse variances of the first-layer weights, the hidden unit
+biases, the second-layer weights and the output unit biases respectively.
+Their values can be adjusted on a logarithmic scale using the sliders, or
+by typing values into the text boxes and pressing the return key.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demrbf1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demrbf1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+
+
+
+Netlab Reference Manual demrbf1
+
+
+
+
demrbf1
+
+
+Purpose
+
+Demonstrate simple regression using a radial basis function network.
+
+
+Synopsis
+
+
+demrbf1
+
+
+
+Description
+
+The problem consists of one input variable x and one target variable
+t with data generated by sampling x at equal intervals and then
+generating target data by computing sin(2*pi*x) and adding Gaussian
+noise. This data is the same as that used in demmlp1.
+
+
Three different RBF networks (with different activation functions)
+are trained in two stages. First, a Gaussian mixture model is trained using
+the EM algorithm, and the centres of this model are used to set the centres
+of the RBF. Second, the output weights (and biases) are determined using the
+pseudo-inverse of the design matrix.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demsom1.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demsom1.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual demsom1
+
+
+
+
demsom1
+
+
+Purpose
+
+Demonstrate SOM for visualisation.
+
+
+Synopsis
+
+
+demsom1
+
+
+
+Description
+
+
+This script demonstrates the use of a
+SOM with a two-dimensional grid to map onto data in
+two-dimensional space. Both on-line and batch training algorithms
+are shown.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/demtrain.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/demtrain.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+
+
+
+Netlab Reference Manual demtrain
+
+
+
+
demtrain
+
+
+Purpose
+
+Demonstrate training of MLP network.
+
+
+Synopsis
+
+
+demtrain
+
+
+
+Description
+
+demtrain brings up a simple GUI to show the training of
+an MLP network on classification and regression problems. The user
+should load in a dataset (which should be in Netlab format: see
+datread), select the output activation function, the
+ number of cycles and hidden units and then
+train the network. The scaled conjugate gradient algorithm is used.
+A graph shows the evolution of the error: the value is shown
+max(ceil(iterations / 50), 5) cycles.
+
+
Once the network is trained, it is saved to the file mlptrain.net.
+The results can then be viewed as a confusion matrix (for classification
+problems) or a plot of output versus target (for regression problems).
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/dist2.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/dist2.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+
+
+
+Netlab Reference Manual dist2
+
+
+
+
dist2
+
+
+Purpose
+
+Calculates squared distance between two sets of points.
+
+
+Synopsis
+
+
+d = dist2(x, c)
+
+
+
+
+Description
+
+d = dist2(x, c) takes two matrices of vectors and calculates the
+squared Euclidean distance between them. Both matrices must be of the
+same column dimension. If x has m rows and n columns, and
+c has l rows and n columns, then the result has
+m rows and l columns. The i, jth entry is the
+squared distance from the ith row of x to the jth
+row of c.
+
+
+Example
+
+The following code is used in rbffwd to calculate the activation of
+a thin plate spline function.
+
+
+evals = eigdec(x, N computes the largest N eigenvalues of the
+matrix x in descending order. [evals, evec] = eigdec(x, N)
+also computes the corresponding eigenvectors.
+
+
+e = errbayes(net, edata) takes a network data structure
+net together
+the data contribution to the error for a set of inputs and targets.
+It returns the regularised error using any zero mean Gaussian priors
+on the weights defined in
+net.
+
+
[e, edata, eprior] = errbayes(net, x, t) additionally returns the
+data and prior components of the error.
+
+
+[net] = evidence(net, x, t) re-estimates the
+hyperparameters alpha and beta by applying Bayesian
+re-estimation formulae for num iterations. The hyperparameter
+alpha can be a simple scalar associated with an isotropic prior
+on the weights, or can be a vector in which each component is
+associated with a group of weights as defined by the index
+matrix in the net data structure. These more complex priors can
+be set up for an MLP using mlpprior. Initial values for the iterative
+re-estimation are taken from the network data structure net
+passed as an input argument, while the return argument net
+contains the re-estimated values.
+
+
[net, gamma, logev] = evidence(net, x, t, num) allows the re-estimation
+formula to be applied for num cycles in which the re-estimated
+values for the hyperparameters from each cycle are used to re-evaluate
+the Hessian matrix for the next cycle. The return value gamma is
+the number of well-determined parameters and logev is the log
+of the evidence.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/fevbayes.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/fevbayes.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+
+
+
+Netlab Reference Manual fevbayes
+
+
+
+
fevbayes
+
+
+Purpose
+
+Evaluate Bayesian regularisation for network forward propagation.
+
+
+Synopsis
+
+
+extra = fevbayes(net, y, a, x, t, x_test)
+[extra, invhess] = fevbayes(net, y, a, x, t, x_test, invhess)
+
+
+
+
+Description
+
+extra = fevbayes(net, y, a, x, t, x_test) takes a network data structure
+net together with a set of hidden unit activations a from
+test inputs x_test, training data inputs x and t and
+outputs a matrix of extra information extra that consists of
+error bars (variance)
+for a regression problem or moderated outputs for a classification problem.
+The optional argument (and return value)
+invhess is the inverse of the network Hessian
+computed on the training data inputs and targets. Passing it in avoids
+recomputing it, which can be a significant saving for large training sets.
+
+
This is called by network-specific functions such as mlpevfwd which
+are needed since the return values (predictions and hidden unit activations)
+for different network types are in different orders (for good reasons).
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gauss.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gauss.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+
+
+
+Netlab Reference Manual gauss
+
+
+
+
gauss
+
+
+Purpose
+
+Evaluate a Gaussian distribution.
+
+
+Synopsis
+
+
+y = gauss(mu, covar, x)
+
+
+
+
+Description
+
+
+
y = gauss(mu, covar, x) evaluates a multi-variate Gaussian
+density in d-dimensions at a set of points given by the rows
+of the matrix x. The Gaussian density has mean vector mu
+and covariance matrix covar.
+
+
+g = gbayes(net, gdata) takes a network data structure net together
+the data contribution to the error gradient
+for a set of inputs and targets.
+It returns the regularised error gradient using any zero mean Gaussian priors
+on the weights defined in
+net. In addition, if a mask is defined in net, then
+the entries in g that correspond to weights with a 0 in the
+mask are removed.
+
+
[g, gdata, gprior] = gbayes(net, gdata) additionally returns the
+data and prior components of the error.
+
+
net = glm(nin, nout, func) takes the number of inputs
+and outputs for a generalized linear model, together
+with a string func which specifies the output unit activation function,
+and returns a data structure net. The weights are drawn from a zero mean,
+isotropic Gaussian, with variance scaled by the fan-in of the
+output units. This makes use of the Matlab function
+randn and so the seed for the random weight initialization can be
+set using randn('state', s) where s is the seed value. The optional
+argument alpha sets the inverse variance for the weight
+initialization.
+
+
The fields in net are
+
+ type = 'glm'
+ nin = number of inputs
+ nout = number of outputs
+ nwts = total number of weights and biases
+ actfn = string describing the output unit activation function:
+ 'linear'
+ 'logistic'
+ 'softmax'
+ w1 = first-layer weight matrix
+ b1 = first-layer bias vector
+
+
+
+
net = glm(nin, nout, func, prior), in which prior is
+a scalar, allows the field
+net.alpha in the data structure net to be set, corresponding
+to a zero-mean isotropic Gaussian prior with inverse variance with
+value prior. Alternatively, prior can consist of a data
+structure with fields alpha and index, allowing individual
+Gaussian priors to be set over groups of weights in the network. Here
+alpha is a column vector in which each element corresponds to a
+separate group of weights, which need not be mutually exclusive. The
+membership of the groups is defined by the matrix index in which
+the columns correspond to the elements of alpha. Each column has
+one element for each weight in the matrix, in the order defined by the
+function glmpak, and each element is 1 or 0 according to whether
+the weight is a member of the corresponding group or not.
+
+
net = glm(nin, nout, func, prior, beta) also sets the
+additional field net.beta in the data structure net, where
+beta corresponds to the inverse noise variance.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/glmderiv.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/glmderiv.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+
+
+
+Netlab Reference Manual glmderiv
+
+
+
+
glmderiv
+
+
+Purpose
+
+Evaluate derivatives of GLM outputs with respect to weights.
+
+
+Synopsis
+
+
+
+g = glmderiv(net, x)
+
+
+
+
+Description
+
+g = glmderiv(net, x) takes a network data structure net and a matrix
+of input vectors x and returns a three-index matrix mat{g} whose
+i, j, k
+element contains the derivative of network output k with respect to
+weight or bias parameter j for input pattern i. The ordering of the
+weight and bias parameters is defined by glmunpak.
+
+
+
+e = glmerr(net, x, t) takes a generalized
+linear model data structure net together with a matrix x
+of input vectors and a matrix t of target vectors, and evaluates
+the error function e. The choice of error function corresponds
+to the output unit activation function. Each row of x
+corresponds to one input vector and each row of t corresponds to
+one target vector.
+
+
[e, edata, eprior, y, a] = glmerr(net, x, t) also returns
+the data and prior components of the total error.
+
+
[e, edata, eprior, y, a] = glmerr(net, x) also returns a matrix y
+giving the outputs of the models and a matrix a
+giving the summed inputs to each output unit, where each row
+corresponds to one pattern.
+
+
+y = glmevfwd(net, x, t, x_test) takes a network data structure
+net together with the input x and target t training data
+and input test data x_test.
+It returns the normal forward propagation through the network y
+together with a matrix extra which consists of error bars (variance)
+for a regression problem or moderated outputs for a classification problem.
+
+
The optional argument (and return value)
+invhess is the inverse of the network Hessian
+computed on the training data inputs and targets. Passing it in avoids
+recomputing it, which can be a significant saving for large training sets.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/glmfwd.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/glmfwd.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+
+
+
+Netlab Reference Manual glmfwd
+
+
+
+
glmfwd
+
+
+Purpose
+
+Forward propagation through generalized linear model.
+
+
+Synopsis
+
+
+y = glmfwd(net, x)
+[y, a] = glmfwd(net, x)
+
+
+
+
+Description
+
+y = glmfwd(net, x) takes a generalized linear model
+data structure net together with
+a matrix x of input vectors, and forward propagates the inputs
+through the network to generate a matrix y of output
+vectors. Each row of x corresponds to one input vector and each
+row of y corresponds to one output vector.
+
+
[y, a] = glmfwd(net, x) also returns a matrix a
+giving the summed inputs to each output unit, where each row
+corresponds to one pattern.
+
+
+g = glmgrad(net, x, t) takes a generalized linear model
+data structure net
+together with a matrix x of input vectors and a matrix t
+of target vectors, and evaluates the gradient g of the error
+function with respect to the network weights. The error function
+corresponds to the choice of output unit activation function. Each row
+of x corresponds to one input vector and each row of t
+corresponds to one target vector.
+
+
[g, gdata, gprior] = glmgrad(net, x, t) also returns separately
+the data and prior contributions to the gradient.
+
+
+h = glmhess(net, x, t) takes a GLM network data structure net,
+a matrix x of input values, and a matrix t of target
+values and returns the full Hessian matrix h corresponding to
+the second derivatives of the negative log posterior distribution,
+evaluated for the current weight and bias values as defined by
+net. Note that the target data is not required in the calculation,
+but is included to make the interface uniform with nethess. For
+linear and logistic outputs, the computation is very simple and is
+done (in effect) in one line in glmtrain.
+
+
[h, hdata] = glmhess(net, x, t) returns both the Hessian matrix
+h and the contribution hdata arising from the data dependent
+term in the Hessian.
+
+
h = glmhess(net, x, t, hdata) takes a network data structure
+net, a matrix x of input values, and a matrix t of
+target values, together with the contribution hdata arising from
+the data dependent term in the Hessian, and returns the full Hessian
+matrix h corresponding to the second derivatives of the negative
+log posterior distribution. This version saves computation time if
+hdata has already been evaluated for the current weight and bias
+values.
+
+
+Example
+
+The Hessian matrix is used by glmtrain to take a Newton step for
+softmax outputs.
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/glminit.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/glminit.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+
+
+
+Netlab Reference Manual glminit
+
+
+
+
glminit
+
+
+Purpose
+
+Initialise the weights in a generalized linear model.
+
+
+Synopsis
+
+
+net = glminit(net, prior)
+
+
+
+
+Description
+
+
+
net = glminit(net, prior) takes a generalized linear model
+net and sets the weights and biases by sampling from a Gaussian
+distribution. If prior is a scalar, then all of the parameters
+(weights and biases) are sampled from a single isotropic Gaussian with
+inverse variance equal to prior. If prior is a data
+structure similar to that in mlpprior but for a single layer of
+weights, then the parameters
+are sampled from multiple Gaussians according to their groupings
+(defined by the index field) with corresponding variances
+(defined by the alpha field).
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/glmtrain.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/glmtrain.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+
+
+
+Netlab Reference Manual glmtrain
+
+
+
+
glmtrain
+
+
+Purpose
+
+Specialised training of generalized linear model
+
+
+Description
+
+net = glmtrain(net, options, x, t) uses
+the iterative reweighted least squares (IRLS)
+algorithm to set the weights in the generalized linear model structure
+net. This is a more efficient alternative to using glmerr
+and glmgrad and a non-linear optimisation routine through
+netopt.
+Note that for linear outputs, a single pass through the
+algorithm is all that is required, since the error function is quadratic in
+the weights. The algorithm also handles scalar alpha and beta
+terms. If you want to use more complicated priors, you should use
+general-purpose non-linear optimisation algorithms.
+
+
For logistic and softmax outputs, general priors can be handled, although
+this requires the pseudo-inverse of the Hessian, giving up the better
+conditioning and some of the speed advantage of the normal form equations.
+
+
The error function value at the final set of weights is returned
+in options(8).
+Each row of x corresponds to one
+input vector and each row of t corresponds to one target vector.
+
+
The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values during training.
+If options(1) is set to 0,
+then only warning messages are displayed. If options(1) is -1,
+then nothing is displayed.
+
+
options(2) is a measure of the precision required for the value
+of the weights w at the solution.
+
+
options(3) is a measure of the precision required of the objective
+function at the solution. Both this and the previous condition must be
+satisfied for termination.
+
+
options(5) is set to 1 if an approximation to the Hessian (which assumes
+that all outputs are independent) is used for softmax outputs. With the default
+value of 0 the exact Hessian (which is more expensive to compute) is used.
+
+
options(14) is the maximum number of iterations for the IRLS algorithm;
+default 100.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/glmunpak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/glmunpak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+
+
+
+Netlab Reference Manual glmunpak
+
+
+
+
glmunpak
+
+
+Purpose
+
+Separates weights vector into weight and bias matrices.
+
+
+Synopsis
+
+
+net = glmunpak(net, w)
+
+
+
+
+Description
+
+net = glmunpak(net, w) takes a glm network data structure net and
+a weight vector w, and returns a network data structure identical to
+the input network, except that the first-layer weight matrix
+w1 and the first-layer bias vector b1 have
+been set to the corresponding elements of w.
+
+
+
+mix = gmm(dim, ncentres, covartype) takes
+the dimension of the space dim, the number of centres in the
+mixture model and the type of the mixture model, and returns a data
+structure mix.
+The mixture model type defines the covariance structure of each component
+Gaussian:
+
+
+ 'spherical' = single variance parameter for each component: stored as a vector
+ 'diag' = diagonal matrix for each component: stored as rows of a matrix
+ 'full' = full matrix for each component: stored as 3d array
+ 'ppca' = probabilistic PCA: stored as principal components (in a 3d array
+ and associated variances and off-subspace noise
+
+
+mix = gmm(dim, ncentres, covartype, ppca_dim) also sets the dimension of
+the PPCA sub-spaces: the default value is one.
+
+
The priors are initialised to equal values summing to one, and the covariances
+are all the identity matrix (or equivalent). The centres are
+initialised randomly from a zero mean unit variance Gaussian. This makes use
+of the MATLAB function randn and so the seed for the random weight
+initialisation can be set using randn('state', s) where s is the
+state value.
+
+
The fields in mix are
+
+
+ type = 'gmm'
+ nin = the dimension of the space
+ ncentres = number of mixture components
+ covartype = string for type of variance model
+ priors = mixing coefficients
+ centres = means of Gaussians: stored as rows of a matrix
+ covars = covariances of Gaussians
+
+
+The additional fields for mixtures of PPCA are
+
+
+ U = principal component subspaces
+ lambda = in-space covariances: stored as rows of a matrix
+
+
+The off-subspace noise is stored in covars.
+
+
+Example
+
+
+
+mix = gmm(2, 4, 'spherical');
+
+
+This creates a Gaussian mixture model with 4 components in 2 dimensions.
+The covariance structure is a spherical model.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gmmactiv.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gmmactiv.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+
+
+
+Netlab Reference Manual gmmactiv
+
+
+
+
gmmactiv
+
+
+Purpose
+
+Computes the activations of a Gaussian mixture model.
+
+
+Synopsis
+
+
+
+a = gmmactiv(mix, x)
+
+
+
+
+Description
+
+This function computes the activations a (i.e. the
+probability p(x|j) of the data conditioned on each component density)
+for a Gaussian mixture model. For the PPCA model, each activation
+is the conditional probability of x given that it is generated
+by the component subspace.
+The data structure mix defines the mixture model, while the matrix
+x contains the data vectors. Each row of x represents a single
+vector.
+
+
+[mix, options, errlog] = gmmem(mix, x, options) uses the Expectation
+Maximization algorithm of Dempster et al. to estimate the parameters of
+a Gaussian mixture model defined by a data structure mix.
+The matrix x represents the data whose expectation
+is maximized, with each row corresponding to a vector.
+
+The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values; also logs error
+values in the return argument errlog.
+If options(1) is set to 0,
+then only warning messages are displayed. If options(1) is -1,
+then nothing is displayed.
+
+
options(3) is a measure of the absolute precision required of the error
+function at the solution. If the change in log likelihood between two steps of
+the EM algorithm is less than this value, then the function terminates.
+
+
options(5) is set to 1 if a covariance matrix is reset to its
+original value when any of its singular values are too small (less
+than MIN_COVAR which has the value eps).
+With the default value of 0 no action is taken.
+
+
options(14) is the maximum number of iterations; default 100.
+
+
The optional return value options contains the final error value
+(i.e. data log likelihood) in
+options(8).
+
+
+Examples
+
+The following code fragment sets up a Gaussian mixture model, initialises
+the parameters from the data, sets the options and trains the model.
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gmminit.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gmminit.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+
+
+
+Netlab Reference Manual gmminit
+
+
+
+
gmminit
+
+
+Purpose
+
+Initialises Gaussian mixture model from data
+
+
+Synopsis
+
+
+
+mix = gmminit(mix, x, options)
+
+
+
+
+Description
+
+mix = gmminit(mix, x, options) uses a dataset x
+to initialise the parameters of a Gaussian mixture
+model defined by the data structure mix. The k-means algorithm
+is used to determine the centres. The priors are computed from the
+proportion of examples belonging to each cluster.
+The covariance matrices are calculated as the sample covariance of the
+points associated with (i.e. closest to) the corresponding centres.
+For a mixture of PPCA model, the PPCA decomposition is calculated
+for the points closest to a given centre.
+This initialisation can be used as the starting point for training the
+model using the EM algorithm.
+
+
+
+This code sets up a Gaussian mixture model with 3 centres in 2 dimensions, and
+then initialises the parameters from the data set data with 5 iterations
+of the k means algorithm.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gmmpost.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gmmpost.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual gmmpost
+
+
+
+
gmmpost
+
+
+Purpose
+
+Computes the class posterior probabilities of a Gaussian mixture model.
+
+
+Synopsis
+
+
+
+function post = gmmpost(mix, x)
+
+
+
+
+Description
+
+This function computes the posteriors post (i.e. the probability of each
+component conditioned on the data p(j|x)) for a Gaussian mixture model.
+The data structure mix defines the mixture model, while the matrix
+x contains the data vectors. Each row of x represents a single
+vector.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gmmprob.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gmmprob.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual gmmprob
+
+
+
+
gmmprob
+
+
+Purpose
+
+Computes the data probability for a Gaussian mixture model.
+
+
+Synopsis
+
+
+
+function prob = gmmprob(mix, x)
+
+
+
+
+Description
+
+
+This function computes the unconditional data
+density p(x) for a Gaussian mixture model. The data structure
+mix defines the mixture model, while the matrix x contains
+the data vectors. Each row of x represents a single vector.
+
+
data = gsamp(mix, n) generates a sample of size n from a
+Gaussian mixture distribution defined by the mix data
+structure. The matrix x has n
+rows in which each row represents a mix.nin-dimensional sample vector.
+
+
[data, label] = gmmsamp(mix, n) also returns a column vector of
+classes (as an index 1..N) label.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gmmunpak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gmmunpak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+
+
+
+Netlab Reference Manual gmmunpak
+
+
+
+
gmmunpak
+
+
+Purpose
+
+Separates a vector of Gaussian mixture model parameters into its components.
+
+
+Synopsis
+
+
+mix = gmmunpak(mix, p)
+
+
+
+
+Description
+
+mix = gmmunpak(mix, p)
+takes a GMM data structure mix and
+a single row vector of parameters p and returns a mixture data structure
+identical to the input mix, except that the mixing coefficients
+priors, centres centres and covariances covars
+(and, for PPCA, the lambdas and U (PCA sub-spaces)) are all set
+to the corresponding elements of p.
+
+
net = gp(nin, covarfn) takes the number of inputs nin
+for a Gaussian Process model with a single output, together
+with a string covarfn which specifies the type of the covariance function,
+and returns a data structure net. The parameters are set to zero.
+
+
The fields in net are
+
+ type = 'gp'
+ nin = number of inputs
+ nout = number of outputs: always 1
+ nwts = total number of weights and covariance function parameters
+ bias = logarithm of constant offset in covariance function
+ noise = logarithm of output noise variance
+ inweights = logarithm of inverse length scale for each input
+ covarfn = string describing the covariance function:
+ 'sqexp'
+ 'ratquad'
+ fpar = covariance function specific parameters (1 for squared exponential,
+ 2 for rational quadratic)
+ trin = training input data (initially empty)
+ trtargets = training target data (initially empty)
+
+
+
+
net = gp(nin, covarfn, prior) sets a Gaussian prior on the
+parameters of the model. prior must contain the fields
+pr_mean and pr_variance. If pr_mean is a scalar,
+then the Gaussian is assumed to be isotropic and the additional fields
+net.pr_mean and pr_variance are set. Otherwise,
+the Gaussian prior has a mean
+defined by a column vector of parameters prior.pr_mean and
+covariance defined by a column vector of parameters prior.pr_variance.
+Each element of prmean corresponds to a separate group of parameters, which
+need not be mutually exclusive. The membership of the groups is defined
+by the matrix prior.index in which the columns correspond to the elements of
+prmean. Each column has one element for each weight in the matrix,
+in the order defined by the function gppak, and each element
+is 1 or 0 according to whether the parameter is a member of the
+corresponding group or not. The additional field net.index is set
+in this case.
+
+
cov = gpcovar(net, x) takes
+a Gaussian Process data structure net together with
+a matrix x of input vectors, and computes the covariance
+matrix cov. The inverse of this matrix is used when calculating
+the mean and variance of the predictions made by net.
+
+
[cov, covf] = gpcovar(net, x) also generates the covariance
+matrix due to the covariance function specified by net.covarfn
+as calculated by gpcovarf.
+
+
+Example
+
+In the following example, the inverse covariance matrix is calculated
+for a set of training inputs x and is then
+passed to gpfwd so that predictions (with mean ytest and
+variance sigsq) can be made for the test inputs
+xtest.
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gpcovarf.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gpcovarf.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual gpcovarf
+
+
+
+
gpcovarf
+
+
+Purpose
+
+Calculate the covariance function for a Gaussian Process.
+
+
+Synopsis
+
+
+covf = gpcovarf(net, x1, x2)
+
+
+
+
+Description
+
+
+
covf = gpcovarf(net, x1, x2) takes
+a Gaussian Process data structure net together with
+two matrices x1 and x2 of input vectors,
+and computes the matrix of the covariance function values
+covf.
+
+
covp = gpcovarp(net, x1, x2) takes
+a Gaussian Process data structure net together with
+two matrices x1 and x2 of input vectors,
+and computes the matrix of the prior covariance. This is
+the function component of the covariance plus the exponential of the bias
+term.
+
+
[covp, covf] = gpcovarp(net, x1, x2) also returns the function
+component of the covariance.
+
+
+e = gperr(net, x, t) takes a Gaussian Process data structure net together
+with a matrix x of input vectors and a matrix t of target
+vectors, and evaluates the error function e. Each row
+of x corresponds to one input vector and each row of t
+corresponds to one target vector.
+
+
[e, edata, eprior] = gperr(net, x, t) additionally returns the
+data and hyperprior components of the error, assuming a Gaussian
+prior on the weights with mean and variance parameters prmean and
+prvariance taken from the network data structure net.
+
+
+y = gpfwd(net, x) takes a Gaussian Process data structure net
+together
+with a matrix x of input vectors, and forward propagates the inputs
+through the model to generate a matrix y of output
+vectors. Each row of x corresponds to one input vector and each
+row of y corresponds to one output vector. This assumes that the
+training data (both inputs and targets) has been stored in net by
+a call to gpinit; these are needed to compute the training
+data covariance matrix.
+
+
[y, sigsq] = gpfwd(net, x) also generates a column vector sigsq of
+conditional variances (or squared error bars) where each value corresponds to a pattern.
+
+
[y, sigsq] = gpfwd(net, x, cninv) uses the pre-computed inverse covariance
+matrix cninv in the forward propagation. This increases efficiency if
+several calls to gpfwd are made.
+
+
+Example
+
+The following code creates a Gaussian Process, trains it, and then plots the
+predictions on a test set with one standard deviation error bars:
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gpgrad.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gpgrad.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+
+
+
+Netlab Reference Manual gpgrad
+
+
+
+
gpgrad
+
+
+Purpose
+
+Evaluate error gradient for Gaussian Process.
+
+
+Synopsis
+
+
+g = gpgrad(net, x, t)
+
+
+
+
+Description
+
+g = gpgrad(net, x, t) takes a Gaussian Process data structure net together
+with a matrix x of input vectors and a matrix t of target
+vectors, and evaluates the error gradient g. Each row
+of x corresponds to one input vector and each row of t
+corresponds to one target vector.
+
+
+net = gpinit(net, trin, trtargets) takes a Gaussian Process data structure net
+together
+with a matrix trin of training input vectors and a matrix trtargets of
+training target
+vectors, and stores them in net. These datasets are required if
+the corresponding inverse covariance matrix is not supplied to gpfwd.
+This is important if the data structure is saved and then reloaded before
+calling gpfwd.
+Each row
+of trin corresponds to one input vector and each row of trtargets
+corresponds to one target vector.
+
+
net = gpinit(net, trin, trtargets, prior) additionally initialises the
+parameters in net from the prior data structure which contains the
+mean and variance of the Gaussian distribution which is sampled from.
+
+
+Example
+
+Suppose that a Gaussian Process model is created and trained with input data x
+and targets t:
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gpunpak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gpunpak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+
+
+
+Netlab Reference Manual gpunpak
+
+
+
+
gpunpak
+
+
+Purpose
+
+Separates hyperparameter vector into components.
+
+
+Synopsis
+
+
+net = gpunpak(net, hp)
+
+
+
+
+Description
+
+net = gpunpak(net, hp) takes an Gaussian Process data structure net and
+a hyperparameter vector hp, and returns a Gaussian Process data structure
+identical to
+the input model, except that the covariance bias
+bias, output noise noise, the input weight vector
+inweights and the vector of covariance function specific parameters
+ fpar have all
+been set to the corresponding elements of hp.
+
+
+This function is intended as a utility for other netlab functions
+(particularly optimisation functions) to use. It enables the user
+to check whether a gradient calculation has been correctly implmented
+for a given function.
+gradchek(w, func, grad) checks how accurate the gradient
+grad of a function func is at a parameter vector x.
+A central
+difference formula with step size 1.0e-6 is used, and the results
+for both gradient function and finite difference approximation are
+printed.
+The optional return value gradient is the gradient calculated
+using the function grad and the return value delta is the
+difference between the functional and finite difference methods of
+calculating the graident.
+
+
gradchek(x, func, grad, p1, p2, ...) allows additional arguments
+to be passed to func and grad.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/graddesc.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/graddesc.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,104 @@
+
+
+
+Netlab Reference Manual graddesc
+
+
+
+
graddesc
+
+
+Purpose
+
+Gradient descent optimization.
+
+
+Description
+
+[x, options, flog, pointlog] = graddesc(f, x, options, gradf) uses
+batch gradient descent to find a local minimum of the function
+f(x) whose gradient is given by gradf(x). A log of the function values
+after each cycle is (optionally) returned in errlog, and a log
+of the points visited is (optionally) returned in pointlog.
+
+
Note that x is a row vector
+and f returns a scalar value.
+The point at which f has a local minimum
+is returned as x. The function value at that point is returned
+in options(8).
+
+
graddesc(f, x, options, gradf, p1, p2, ...) allows
+additional arguments to be passed to f() and gradf().
+
+
The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values; also logs error
+values in the return argument errlog, and the points visited
+in the return argument pointslog. If options(1) is set to 0,
+then only warning messages are displayed. If options(1) is -1,
+then nothing is displayed.
+
+
options(2) is the absolute precision required for the value
+of x at the solution. If the absolute difference between
+the values of x between two successive steps is less than
+options(2), then this condition is satisfied.
+
+
options(3) is a measure of the precision required of the objective
+function at the solution. If the absolute difference between the
+objective function values between two successive steps is less than
+options(3), then this condition is satisfied.
+Both this and the previous condition must be
+satisfied for termination.
+
+
options(7) determines the line minimisation method used. If it
+is set to 1 then a line minimiser is used (in the direction of the negative
+gradient). If it is 0 (the default), then each parameter update
+is a fixed multiple (the learning rate)
+of the negative gradient added to a fixed multiple (the momentum) of
+the previous parameter update.
+
+
options(9) should be set to 1 to check the user defined gradient
+function gradf with gradchek. This is carried out at
+the initial parameter vector x.
+
+
options(10) returns the total number of function evaluations (including
+those in any line searches).
+
+
options(11) returns the total number of gradient evaluations.
+
+
options(14) is the maximum number of iterations; default 100.
+
+
options(15) is the precision in parameter space of the line search;
+default foptions(2).
+
+
options(17) is the momentum; default 0.5. It should be scaled by the
+inverse of the number of data points.
+
+
options(18) is the learning rate; default 0.01. It should be
+scaled by the inverse of the number of data points.
+
+
+Examples
+
+An example of how this function can be used to train a neural network is:
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gsamp.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gsamp.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual gsamp
+
+
+
+
gsamp
+
+
+Purpose
+
+Sample from a Gaussian distribution.
+
+
+Synopsis
+
+
+x = gsamp(mu, covar, nsamp)
+
+
+
+
+Description
+
+
+
x = gsamp(mu, covar, nsamp) generates a sample of size nsamp
+from a d-dimensional Gaussian distribution. The Gaussian density
+has mean vector mu and covariance matrix covar, and the
+matrix x has nsamp rows in which each row represents a
+d-dimensional sample vector.
+
+
net = gtm(dimlatent, nlatent, dimdata, ncentres, rbfunc),
+takes the dimension of the latent space dimlatent, the
+number of data points sampled in the latent space nlatent, the
+dimension of the data space dimdata, the number of centres in the
+RBF model ncentres, the activation function for the RBF
+rbfunc
+and returns a data structure net. The parameters in the
+RBF and GMM sub-models are set by calls to the corresponding creation routines
+rbf and gmm.
+
+
The fields in net are
+
+ type = 'gtm'
+ nin = dimension of data space
+ dimlatent = dimension of latent space
+ rbfnet = RBF network data structure
+ gmmnet = GMM data structure
+ X = sample of latent points
+
+
+
+
net = gtm(dimlatent, nlatent, dimdata, ncentres, rbfunc, prior),
+ sets a Gaussian zero mean prior on the
+parameters of the RBF model. prior must be a scalar and represents
+the inverse variance of the prior distribution. This gives rise to
+a weight decay term in the error function.
+
+
+[net, options, errlog] = gtmem(net, t, options) uses the Expectation
+Maximization algorithm to estimate the parameters of
+a GTM defined by a data structure net.
+The matrix t represents the data whose expectation
+is maximized, with each row corresponding to a vector. It is assumed
+that the latent data net.X has been set following a call to
+gtminit, for example.
+
+The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values; also logs error
+values in the return argument errlog.
+If options(1) is set to 0,
+then only warning messages are displayed. If options(1) is -1,
+then nothing is displayed.
+
+
options(3) is a measure of the absolute precision required of the error
+function at the solution. If the change in log likelihood between two steps of
+the EM algorithm is less than this value, then the function terminates.
+
+
options(14) is the maximum number of iterations; default 100.
+
+
The optional return value options contains the final error value
+(i.e. data log likelihood) in
+options(8).
+
+
+Examples
+
+The following code fragment sets up a GTM, initialises
+the latent data sample and RBF
+parameters from the data, sets the options and trains the model.
+
+
+% Create and initialise GTM model
+net = gtm(latentdim, nlatent, datadim, numrbfcentres, ...
+ 'gaussian', 0.1);
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gtmfwd.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gtmfwd.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual gtmfwd
+
+
+
+
gtmfwd
+
+
+Purpose
+
+Forward propagation through GTM.
+
+
+Synopsis
+
+
+mix = gtmfwd(net)
+
+
+
+
+Description
+
+
+mix = gtmfwd(net) takes a GTM
+structure net, and forward
+propagates the latent data sample net.X through the GTM to generate
+the structure
+mix which represents the Gaussian mixture model in data space.
+
+
+net = gtminit(net, options, data, samptype) takes a GTM net
+and generates a sample of latent data points and sets the centres (and
+widths if appropriate) of
+net.rbfnet.
+
+
If the samptype is 'regular', then regular grids of latent
+data points and RBF centres are created. The dimension of the latent data
+space must be
+1 or 2. For one-dimensional latent space, the lsampsize parameter
+gives the number of latent points and the rbfsampsize parameter
+gives the number of RBF centres. For a two-dimensional latent space,
+these parameters must be vectors of length 2 with the number of points
+in each of the x and y directions to create a rectangular grid. The
+widths of the RBF basis functions are set by a call to rbfsetfw
+passing options(7) as the scaling parameter.
+
+
If the samptype is 'uniform' or 'gaussian' then the
+latent data is found by sampling from a uniform or
+Gaussian distribution correspondingly. The RBF basis function parameters
+are set
+by a call to rbfsetbf with the data parameter
+as dataset and the options vector.
+
+
Finally, the output layer weights of the RBF are initialised by
+mapping the mean of the latent variable to the mean of the target variable,
+and the L-dimensional latent variale variance to the variance of the
+targets along the first L principal components.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gtmlmean.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gtmlmean.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual gtmlmean
+
+
+
+
gtmlmean
+
+
+Purpose
+
+Mean responsibility for data in a GTM.
+
+
+Synopsis
+
+
+means = gtmlmean(net, data)
+
+
+
+
+Description
+
+
+means = gtmlmean(net, data) takes a GTM
+structure net, and computes the means of the responsibility
+distributions for each data point in data.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gtmlmode.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gtmlmode.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+
+
+
+Netlab Reference Manual gtmlmode
+
+
+
+
gtmlmode
+
+
+Purpose
+
+Mode responsibility for data in a GTM.
+
+
+Synopsis
+
+
+modes = gtmlmode(net, data)
+
+
+
+
+Description
+
+
+modes = gtmlmode(net, data) takes a GTM
+structure net, and computes the modes of the responsibility
+distributions for each data point in data. These will always lie
+at one of the latent space sample points net.X.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/gtmmag.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/gtmmag.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual gtmmag
+
+
+
+
gtmmag
+
+
+Purpose
+
+Magnification factors for a GTM
+
+
+Synopsis
+
+
+mags = gtmmag(net, latentdata)
+
+
+
+
+Description
+
+
+mags = gtmmag(net, latentdata) takes a GTM
+structure net, and computes the magnification factors
+for each point the latent space contained in latentdata.
+
+
+
+post = gtmpost(net, data) takes a GTM
+structure net, and computes the responsibility at each latent space
+sample point net.X
+for each data point in data.
+
+
[post, a] = gtmpost(net, data) also returns the activations
+a of the GMM net.gmmnet as computed by gmmpost.
+
+
+h = hbayes(net, hdata) takes a network data structure net together
+the data contribution to the Hessian
+for a set of inputs and targets.
+It returns the regularised Hessian using any zero mean Gaussian priors
+on the weights defined in
+net. In addition, if a mask is defined in net, then
+the entries in h that correspond to weights with a 0 in the
+mask are removed.
+
+
[h, hdata] = hbayes(net, hdata) additionally returns the
+data component of the Hessian.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/hesschek.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/hesschek.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+
+
+
+Netlab Reference Manual hesschek
+
+
+
+
hesschek
+
+
+Purpose
+
+Use central differences to confirm correct evaluation of Hessian matrix.
+
+
+Synopsis
+
+
+hesschek(net, x, t)
+h = hesschek(net, x, t)
+
+
+
+Description
+
+
+
hesschek(net, x, t) takes a network data structure net, together
+with input and target data matrices x and t, and compares
+the evaluation of the Hessian matrix using the function nethess
+and using central differences with the function neterr.
+
+
The optional return value h is the Hessian computed using
+nethess.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/hintmat.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/hintmat.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+
+
+
+Netlab Reference Manual hintmat
+
+
+
+
hintmat
+
+
+Purpose
+
+Evaluates the coordinates of the patches for a Hinton diagram.
+
+
+Synopsis
+
+
+[xvals, yvals, color] = hintmat(w)
+
+
+
+Description
+
+
+[xvals, yvals, color] = hintmat(w)
+
+takes a matrix w and
+returns coordinates xvals, yvals for the patches comrising the
+Hinton diagram, together with a vector color labelling the color
+(black or white) of the corresponding elements according to their
+sign.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/histp.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/histp.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+
+
+
+Netlab Reference Manual histp
+
+
+
+
histp
+
+
+Purpose
+
+Histogram estimate of 1-dimensional probability distribution.
+
+
+Synopsis
+
+
+h = histp(x, xmin, xmax, nbins)
+
+
+
+
+Description
+
+
+
histp(x, xmin, xmax, nbins) takes a column vector x
+of data values and generates a normalized histogram plot of the
+distribution. The histogram has nbins bins lying in the
+range xmin to xmax.
+
+
h = histp(...) returns a vector of patch handles.
+
+
+samples = hmc(f, x, options, gradf) uses a
+hybrid Monte Carlo algorithm to sample from the distribution p ~ exp(-f),
+where f is the first argument to hmc.
+The Markov chain starts at the point x, and the function gradf
+is the gradient of the `energy' function f.
+
+
hmc(f, x, options, gradf, p1, p2, ...) allows
+additional arguments to be passed to f() and gradf().
+
+
[samples, energies, diagn] = hmc(f, x, options, gradf) also returns
+a log of the energy values (i.e. negative log probabilities) for the
+samples in energies and diagn, a structure containing
+diagnostic information (position, momentum and
+acceptance threshold) for each step of the chain in diagn.pos,
+diagn.mom and
+diagn.acc respectively. All candidate states (including rejected ones)
+are stored in diagn.pos.
+
+
[samples, energies, diagn] = hmc(f, x, options, gradf) also returns the
+energies (i.e. negative log probabilities) corresponding to the samples.
+The diagn structure contains three fields:
+
+
pos the position vectors of the dynamic process.
+
+
mom the momentum vectors of the dynamic process.
+
+
acc the acceptance thresholds.
+
+
s = hmc('state') returns a state structure that contains the state of the
+two random number generators rand and randn and the momentum of
+the dynamic process. These are contained in fields
+randstate, randnstate
+and mom respectively. The momentum state is
+only used for a persistent momentum update.
+
+
hmc('state', s) resets the state to s. If s is an integer,
+then it is passed to rand and randn and the momentum variable
+is randomised. If s is a structure returned by hmc('state') then
+it resets the generator to exactly the same state.
+
+
The optional parameters in the options vector have the following
+interpretations.
+
+
options(1) is set to 1 to display the energy values and rejection
+threshold at each step of the Markov chain. If the value is 2, then the
+position vectors at each step are also displayed.
+
+
options(5) is set to 1 if momentum persistence is used; default 0, for
+complete replacement of momentum variables.
+
+
options(7) defines the trajectory length (i.e. the number of leap-frog
+steps at each iteration). Minimum value 1.
+
+
options(9) is set to 1 to check the user defined gradient function.
+
+
options(14) is the number of samples retained from the Markov chain;
+default 100.
+
+
options(15) is the number of samples omitted from the start of the
+chain; default 0.
+
+
options(17) defines the momentum used when a persistent update of
+(leap-frog) momentum is used. This is bounded to the interval [0, 1).
+
+
options(18) is the step size used in leap-frogs; default 1/trajectory
+length.
+
+
+Examples
+
+The following code fragment samples from the posterior distribution of
+weights for a neural network.
+
+
+The algroithm follows the procedure outlined in Radford Neal's technical
+report CRG-TR-93-1 from the University of Toronto. The stochastic update of
+momenta samples from a zero mean unit covariance gaussian.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/index.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/index.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,537 @@
+
+
+
+NETLAB Reference Documentation
+
+
+
+
NETLAB Online Reference Documentation
+Welcome to the NETLAB online reference documentation.
+The NETLAB simulation software is designed to provide all the tools necessary
+for principled and theoretically well founded application development. The
+NETLAB library is based on the approach and techniques described in Neural
+Networks for Pattern Recognition (Bishop, 1995). The library includes software
+implementations of a wide range of data analysis techniques, many of which are
+not widely available, and are rarely, if ever, included in standard neural
+network simulation packages.
+
The online reference documentation provides direct hypertext links to specific Netlab function descriptions.
+
Copyright (c) Christopher M Bishop, Ian T Nabney (1996, 1997)
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/kmeans.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/kmeans.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,89 @@
+
+
+
+centres = kmeans(centres, data, options)
+uses the batch K-means algorithm to set the centres of a cluster model.
+The matrix data represents the data
+which is being clustered, with each row corresponding to a vector.
+The sum of squares error function is used. The point at which
+a local minimum is achieved is returned as centres. The
+error value at that point is returned in options(8).
+
+
[centres, options, post, errlog] = kmeans(centres, data, options)
+also returns the cluster number (in a one-of-N encoding) for each data
+point in post and a log of the error values after each cycle in
+errlog.
+
+The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values; also logs error
+values in the return argument errlog.
+If options(1) is set to 0,
+then only warning messages are displayed. If options(1) is -1,
+then nothing is displayed.
+
+
options(2) is a measure of the absolute precision required for the value
+of centres at the solution. If the absolute difference between
+the values of centres between two successive steps is less than
+options(2), then this condition is satisfied.
+
+
options(3) is a measure of the precision required of the error
+function at the solution. If the absolute difference between the
+error functions between two successive steps is less than
+options(3), then this condition is satisfied.
+Both this and the previous condition must be
+satisfied for termination.
+
+
options(14) is the maximum number of iterations; default 100.
+
+
+Example
+
+kmeans can be used to initialise the centres of a Gaussian
+mixture model that is then trained with the EM algorithm.
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/knn.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/knn.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,54 @@
+
+
+
+Netlab Reference Manual knn
+
+
+
+
knn
+
+
+Purpose
+
+Creates a K-nearest-neighbour classifier.
+
+
+Synopsis
+
+
+
+net = knn(nin, nout, k, tr_in, tr_targets)
+
+
+
+
+Description
+
+net = knn(nin, nout, k, tr_in, tr_targets) creates a KNN model net
+with input dimension nin, output dimension nout and k
+neighbours. The training data is also stored in the data structure and the
+targets are assumed to be using a 1-of-N coding.
+
+
The fields in net are
+
+
+ type = 'knn'
+ nin = number of inputs
+ nout = number of outputs
+ tr_in = training input data
+ tr_targets = training target data
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/knnfwd.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/knnfwd.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,66 @@
+
+
+
+Netlab Reference Manual knnfwd
+
+
+
+
knnfwd
+
+
+Purpose
+
+Forward propagation through a K-nearest-neighbour classifier.
+
+
+Synopsis
+
+
+
+[y, l] = knnfwd(net, x)
+
+
+
+
+Description
+
+[y, l] = knnfwd(net, x) takes a matrix x
+of input vectors (one vector per row)
+ and uses the k-nearest-neighbour rule on the training data contained
+in net to
+produce
+a matrix y of outputs and a matrix l of classification
+labels.
+The nearest neighbours are determined using Euclidean distance.
+The ijth entry of y counts the number of occurrences that
+an example from class j is among the k closest training
+examples to example i from x.
+The matrix l contains the predicted class labels
+as an index 1..N, not as 1-of-N coding.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/linef.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/linef.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+
+
+
+Netlab Reference Manual linef
+
+
+
+
linef
+
+
+Purpose
+
+Calculate function value along a line.
+
+
+Description
+
+linef(lambda, fn, x, d) calculates the value of the function
+fn at the point x+lambda*d. Here x is a row vector
+and lambda is a scalar.
+
+
linef(lambda, fn, x, d, p1, p2, ...) allows additional
+arguments to be passed to fn().
+This function is used for convenience in some of the optimisation routines.
+
+
+Examples
+
+An example of
+the use of this function can be found in the function linemin.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/linemin.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/linemin.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,74 @@
+
+
+
+Netlab Reference Manual linemin
+
+
+
+
linemin
+
+
+Purpose
+
+One dimensional minimization.
+
+
+Description
+
+[x, options] = linemin(f, pt, dir, fpt, options) uses Brent's
+algorithm to find the minimum of the function f(x) along the
+line dir through the point pt. The function value at the
+starting point is fpt. The point at which f has a local minimum
+is returned as x. The function value at that point is returned
+in options(8).
+
+
linemin(f, pt, dir, fpt, options, p1, p2, ...) allows
+additional arguments to be passed to f().
+
+
The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values.
+
+
options(2) is a measure of the absolute precision required for the value
+of x at the solution.
+
+
options(3) is a measure of the precision required of the objective
+function at the solution. Both this and the previous condition must be
+satisfied for termination.
+
+
options(14) is the maximum number of iterations; default 100.
+
+
+Examples
+
+An example of the use of this function to find the minimum of a function
+f in the direction sd can be found in conjgrad
+
+
+x = linemin(f, xold, sd, fold, lineoptions);
+
+
+
+
+Algorithm
+
+
+Brent's algorithm uses a mixture of quadratic interpolation and golden
+section search to find the minimum of a function of a single variable once
+it has been bracketed (which is done with minbrack). This is adapted
+to minimize a function along a line.
+This implementation
+is based on that in Numerical Recipes.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/maxitmess.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/maxitmess.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+
+
+
+Netlab Reference Manual maxitmess
+
+
+
+
maxitmess
+
+
+Purpose
+
+Create a standard error message when training reaches max. iterations.
+
+
+Synopsis
+
+
+s = maxitmess
+
+
+
+
+Description
+
+s = maxitmess returns a standard string that it used by training
+algorithms when the maximum number of iterations (as specified in
+options(14) is reached.
+
+
+net = mdn(nin, nhidden, ncentres, dimtarget) takes the number of
+inputs,
+hidden units for a 2-layer feed-forward
+network and the number of centres and target dimension for the
+mixture model whose parameters are set from the outputs of the neural network.
+The fifth argument mixtype is used to define the type of mixture
+model. (Currently there is only one type supported: a mixture of Gaussians with
+a single covariance parameter for each component.) For this model,
+the mixture coefficients are computed from a group of softmax outputs,
+the centres are equal to a group of linear outputs, and the variances are
+obtained by applying the exponential function to a third group of outputs.
+
+
The network is initialised by a call to mlp, and the arguments
+prior, and beta have the same role as for that function.
+Weight initialisation uses the Matlab function randn
+ and so the seed for the random weight initialization can be
+set using randn('state', s) where s is the seed value.
+A specialised data structure (rather than gmm)
+is used for the mixture model outputs to improve
+the efficiency of error and gradient calculations in network training.
+The fields are described in mdnfwd where they are set up.
+
+
The fields in net are
+
+
+ type = 'mdn'
+ nin = number of input variables
+ nout = dimension of target space (not number of network outputs)
+ nwts = total number of weights and biases
+ mdnmixes = data structure for mixture model output
+ mlp = data structure for MLP network
+
+
+
+
+Example
+
+
+
+net = mdn(2, 4, 3, 1, 'spherical');
+
+
+This creates a Mixture Density Network with 2 inputs and 4 hidden units.
+The mixture model has 3 components and the target space has dimension 1.
+
+
David J Evans (1998)
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mdn2gmm.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mdn2gmm.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,58 @@
+
+
+
+Netlab Reference Manual mdn2gmm
+
+
+
+
mdn2gmm
+
+
+Purpose
+
+Converts an MDN mixture data structure to array of GMMs.
+
+
+Synopsis
+
+
+gmmmixes = mdn2gmm(mdnmixes)
+
+
+
+
+Description
+
+gmmmixes = mdn2gmm(mdnmixes) takes an MDN mixture data structure
+mdnmixes
+containing three matrices (for priors, centres and variances) where each
+row represents the corresponding parameter values for a different mixture model
+and creates an array of GMMs. These can then be used with the standard
+Netlab Gaussian mixture model functions.
+
+
+
+This creates an array GMM mixture models (one for each data point in
+x). The vector p is then filled with the conditional
+probabilities of the values y given x(1,:).
+
+
David J Evans (1998)
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mdndist2.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mdndist2.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+
+
+
+Netlab Reference Manual mdndist2
+
+
+
+
mdndist2
+
+
+Purpose
+
+Calculates squared distance between centres of Gaussian kernels and data
+
+
+Synopsis
+
+
+n2 = mdndist2(mixparams, t)
+
+
+
+
+Description
+
+n2 = mdndist2(mixparams, t) takes takes the centres of the Gaussian
+contained in
+ mixparams and the target data matrix, t, and computes the squared
+Euclidean distance between them. If t has m rows and n
+columns, then the centres field in
+the mixparams structure should have m rows and
+n*mixparams.ncentres columns: the centres in each row relate to
+the corresponding row in t.
+The result has m rows and mixparams.ncentres columns.
+The i, jth entry is the
+squared distance from the ith row of x to the jth
+centre in the ith row of mixparams.centres.
+
+
David J Evans (1998)
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mdnerr.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mdnerr.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+
+
+
+Netlab Reference Manual mdnerr
+
+
+
+
mdnerr
+
+
+Purpose
+
+Evaluate error function for Mixture Density Network.
+
+
+Synopsis
+
+
+e = mdnerr(net, x, t)
+
+
+
+
+Description
+
+
+e = mdnerr(net, x, t) takes a mixture density network data
+structure net, a matrix x of input vectors and a matrix
+t of target vectors, and evaluates the error function
+e. The error function is the negative log likelihood of the
+target data under the conditional density given by the mixture model
+parameterised by the MLP. Each row of x corresponds to one
+input vector and each row of t corresponds to one target vector.
+
+
David J Evans (1998)
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mdnfwd.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mdnfwd.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,71 @@
+
+
+
+Netlab Reference Manual mdnfwd
+
+
+
+
mdnfwd
+
+
+Purpose
+
+Forward propagation through Mixture Density Network.
+
+
+Synopsis
+
+
+mixparams = mdnfwd(net, x)
+[mixparams, y, z] = mdnfwd(net, x)
+[mixparams, y, z, a] = mdnfwd(net, x)
+
+
+
+
+Description
+
+
+mixparams = mdnfwd(net, x) takes a mixture density network data
+structure net and a matrix x of input vectors, and forward
+propagates the inputs through the network to generate a structure
+mixparams which contains the parameters of several mixture models.
+Each row of x represents
+one input vector and the corresponding row of the matrices in mixparams
+represents the parameters of a mixture model for the conditional probability
+of target vectors given the input vector. This is not represented as an array
+of gmm structures to improve the efficiency of MDN training.
+
+
The fields in mixparams are
+
+
+ type = 'mdnmixes'
+ ncentres = number of mixture components
+ dimtarget = dimension of target space
+ mixcoeffs = mixing coefficients
+ centres = means of Gaussians: stored as one row per pattern
+ covars = covariances of Gaussians
+ nparams = number of parameters
+
+
+
+
[mixparams, y, z] = mdnfwd(net, x) also generates a matrix y of
+the outputs of the MLP and a matrix z of the hidden
+unit activations where each row corresponds to one pattern.
+
+
[mixparams, y, z, a] = mlpfwd(net, x) also returns a matrix a
+giving the summed inputs to each output unit, where each row
+corresponds to one pattern.
+
+
David J Evans (1998)
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mdngrad.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mdngrad.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+
+
+
+Netlab Reference Manual mdngrad
+
+
+
+
mdngrad
+
+
+Purpose
+
+Evaluate gradient of error function for Mixture Density Network.
+
+
+Synopsis
+
+
+
+g = mdngrad(net, x, t)
+
+
+
+
+Description
+
+
+g = mdngrad(net, x, t) takes a mixture density network data
+structure net, a matrix x of input vectors and a matrix
+t of target vectors, and evaluates the gradient g of the
+error function with respect to the network weights. The error function
+is negative log likelihood of the target data. Each row of x
+corresponds to one input vector and each row of t corresponds to
+one target vector.
+
+
net = mdninit(net, prior) takes a Mixture Density Network
+net and sets the weights and biases by sampling from a Gaussian
+distribution. It calls mlpinit for the MLP component of net.
+
+
net = mdninit(net, prior, t, options) uses the target data t to
+initialise the biases for the output units after initialising the
+other weights as above. It calls gmminit, with t and options
+as arguments, to obtain a model of the unconditional density of t. The
+biases are then set so that net will output the values in the Gaussian
+mixture model.
+
+
+post = mdnpost(mixparams, t) computes the posterior
+probability p(j|t) of each
+data vector in t under the Gaussian mixture model represented by the
+corresponding entries in mixparams. Each row of t represents a
+single vector.
+
+
[post, a] = mdnpost(mixparams, t) also computes the activations
+a (i.e. the probability p(t|j) of the data conditioned on
+each component density) for a Gaussian mixture model.
+
+
+prob = mdnprob(mixparams, t) computes the probability p(t) of each
+data vector in t under the Gaussian mixture model represented by the
+corresponding entries in mixparams. Each row of t represents a
+single vector.
+
+
[prob, a] = mdnprob(mixparams, t) also computes the activations
+a (i.e. the probability p(t|j) of the data conditioned on
+each component density) for a Gaussian mixture model.
+
+
David J Evans (1998)
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mdnunpak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mdnunpak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual mdnunpak
+
+
+
+
mdnunpak
+
+
+Purpose
+
+Separates weights vector into weight and bias matrices.
+
+
+Synopsis
+
+
+net = mdnunpak(net, w)
+
+
+
+
+Description
+
+net = mdnunpak(net, w) takes an mdn network data structure net and
+a weight vector w, and returns a network data structure identical to
+the input network, except that the weights in the MLP sub-structure are
+set to the corresponding elements of w.
+
+
+
+samples = metrop(f, x, options) uses
+the Metropolis algorithm to sample from the distribution
+p ~ exp(-f), where f is the first argument to metrop.
+The Markov chain starts at the point x and each
+candidate state is picked from a Gaussian proposal distribution and
+accepted or rejected according to the Metropolis criterion.
+
+
samples = metrop(f, x, options, [], p1, p2, ...) allows
+additional arguments to be passed to f(). The fourth argument is
+ignored, but is included for compatibility with hmc and the
+optimisers.
+
+
[samples, energies, diagn] = metrop(f, x, options) also returns
+a log of the energy values (i.e. negative log probabilities) for the
+samples in energies and diagn, a structure containing
+diagnostic information (position and
+acceptance threshold) for each step of the chain in diagn.pos and
+diagn.acc respectively. All candidate states (including rejected
+ones) are stored in diagn.pos.
+
+
s = metrop('state') returns a state structure that contains the
+state of the two random number generators rand and randn.
+These are contained in fields
+randstate,
+randnstate.
+
+
metrop('state', s) resets the state to s. If s is an integer,
+then it is passed to rand and randn.
+If s is a structure returned by metrop('state') then
+it resets the generator to exactly the same state.
+
+
The optional parameters in the options vector have the following
+interpretations.
+
+
options(1) is set to 1 to display the energy values and rejection
+threshold at each step of the Markov chain. If the value is 2, then the
+position vectors at each step are also displayed.
+
+
options(14) is the number of samples retained from the Markov chain;
+default 100.
+
+
options(15) is the number of samples omitted from the start of the
+chain; default 0.
+
+
options(18) is the variance of the proposal distribution; default 1.
+
+
+Examples
+
+The following code fragment samples from the posterior distribution of
+weights for a neural network.
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/minbrack.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/minbrack.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+
+
+
+Netlab Reference Manual minbrack
+
+
+
+
minbrack
+
+
+Purpose
+
+Bracket a minimum of a function of one variable.
+
+
+Description
+
+brmin, brmid, brmax, numevals] = minbrack(f, a, b, fa)
+finds a bracket of three points around a local minimum of
+f. The function f must have a one dimensional domain.
+a < b is an initial guess at the minimum and maximum points
+of a bracket, but minbrack will search outside this interval if
+necessary. The bracket consists of three points (in increasing order)
+such that f(brmid) < f(brmin) and f(brmid) < f(brmax).
+fa is the value of the function at a: it is included to
+avoid unnecessary function evaluations in the optimization routines.
+The return value numevals is the number of function evaluations
+in minbrack.
+
+
minbrack(f, a, b, fa, p1, p2, ...) allows additional
+arguments to be passed to f
+
+
+Examples
+
+An example of the use of this function to bracket the minimum of a function
+f in the direction sd can be found in linemin
+
+
+where the function linef is used to turn a general function f
+into a one dimensional one.
+
+
+Algorithm
+
+
+Quadratic extrapolation with a limit to the maximum step size is
+used to find the outside points of the bracket. This implementation
+is based on that in Numerical Recipes.
+
+
+net = mlp(nin, nhidden, nout, func) takes the number of inputs,
+hidden units and output units for a 2-layer feed-forward network,
+together with a string func which specifies the output unit
+activation function, and returns a data structure net. The
+weights are drawn from a zero mean, unit variance isotropic Gaussian,
+with varianced scaled by the fan-in of the hidden or output units as
+appropriate. This makes use of the Matlab function
+randn and so the seed for the random weight initialization can be
+set using randn('state', s) where s is the seed value.
+The hidden units use the tanh activation function.
+
+
The fields in net are
+
+
+ type = 'mlp'
+ nin = number of inputs
+ nhidden = number of hidden units
+ nout = number of outputs
+ nwts = total number of weights and biases
+ actfn = string describing the output unit activation function:
+ 'linear'
+ 'logistic
+ 'softmax'
+ w1 = first-layer weight matrix
+ b1 = first-layer bias vector
+ w2 = second-layer weight matrix
+ b2 = second-layer bias vector
+
+
+Here w1 has dimensions nin times nhidden, b1 has
+dimensions 1 times nhidden, w2 has
+dimensions nhidden times nout, and b2 has
+dimensions 1 times nout.
+
+
net = mlp(nin, nhidden, nout, func, prior), in which prior is
+a scalar, allows the field net.alpha in the data structure
+net to be set, corresponding to a zero-mean isotropic Gaussian
+prior with inverse variance with value prior. Alternatively,
+prior can consist of a data structure with fields alpha
+and index, allowing individual Gaussian priors to be set over
+groups of weights in the network. Here alpha is a column vector
+in which each element corresponds to a separate group of weights,
+which need not be mutually exclusive. The membership of the groups is
+defined by the matrix indx in which the columns correspond to
+the elements of alpha. Each column has one element for each
+weight in the matrix, in the order defined by the function
+mlppak, and each element is 1 or 0 according to whether the
+weight is a member of the corresponding group or not. A utility
+function mlpprior is provided to help in setting up the
+prior data structure.
+
+
net = mlp(nin, nhidden, nout, func, prior, beta) also sets the
+additional field net.beta in the data structure net, where
+beta corresponds to the inverse noise variance.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlpbkp.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlpbkp.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+
+
+
+Netlab Reference Manual mlpbkp
+
+
+
+
mlpbkp
+
+
+Purpose
+
+Backpropagate gradient of error function for 2-layer network.
+
+
+Synopsis
+
+
+g = mlpbkp(net, x, z, deltas)
+
+
+
+Description
+
+g = mlpbkp(net, x, z, deltas) takes a network data structure
+net together with a matrix x of input vectors, a matrix
+z of hidden unit activations, and a matrix deltas of the
+gradient of the error function with respect to the values of the
+output units (i.e. the summed inputs to the output units, before the
+activation function is applied). The return value is the gradient
+g of the error function with respect to the network
+weights. Each row of x corresponds to one input vector.
+
+
This function is provided so that the common backpropagation algorithm
+can be used by multi-layer perceptron network models to compute
+gradients for mixture density networks as well as standard error
+functions.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlpderiv.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlpderiv.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+
+
+
+Netlab Reference Manual mlpderiv
+
+
+
+
mlpderiv
+
+
+Purpose
+
+Evaluate derivatives of network outputs with respect to weights.
+
+
+Synopsis
+
+
+g = mlpderiv(net, x)
+
+
+
+Description
+
+g = mlpderiv(net, x) takes a network data structure net
+and a matrix of input vectors x and returns a three-index matrix
+g whose i, j, k element contains the
+derivative of network output k with respect to weight or bias
+parameter j for input pattern i. The ordering of the
+weight and bias parameters is defined by mlpunpak.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlperr.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlperr.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+
+
+
+Netlab Reference Manual mlperr
+
+
+
+
mlperr
+
+
+Purpose
+
+Evaluate error function for 2-layer network.
+
+
+Synopsis
+
+
+e = mlperr(net, x, t)
+
+
+
+
+Description
+
+e = mlperr(net, x, t) takes a network data structure net together
+with a matrix x of input vectors and a matrix t of target
+vectors, and evaluates the error function e. The choice of error
+function corresponds to the output unit activation function. Each row
+of x corresponds to one input vector and each row of t
+corresponds to one target vector.
+
+
[e, edata, eprior] = mlperr(net, x, t) additionally returns the
+data and prior components of the error, assuming a zero mean Gaussian
+prior on the weights with inverse variance parameters alpha and
+beta taken from the network data structure net.
+
+
+y = mlpevfwd(net, x, t, x_test) takes a network data structure
+net together with the input x and target t training data
+and input test data x_test.
+It returns the normal forward propagation through the network y
+together with a matrix extra which consists of error bars (variance)
+for a regression problem or moderated outputs for a classification problem.
+The optional argument (and return value)
+invhess is the inverse of the network Hessian
+computed on the training data inputs and targets. Passing it in avoids
+recomputing it, which can be a significant saving for large training sets.
+
+
+y = mlpfwd(net, x) takes a network data structure net together with
+a matrix x of input vectors, and forward propagates the inputs
+through the network to generate a matrix y of output
+vectors. Each row of x corresponds to one input vector and each
+row of y corresponds to one output vector.
+
+
[y, z] = mlpfwd(net, x) also generates a matrix z of the hidden
+unit activations where each row corresponds to one pattern.
+
+
[y, z, a] = mlpfwd(net, x) also returns a matrix a
+giving the summed inputs to each output unit, where each row
+corresponds to one pattern.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlpgrad.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlpgrad.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+
+
+
+Netlab Reference Manual mlpgrad
+
+
+
+
mlpgrad
+
+
+Purpose
+
+Evaluate gradient of error function for 2-layer network.
+
+
+Synopsis
+
+
+
+g = mlpgrad(net, x, t)
+
+
+
+
+Description
+
+g = mlpgrad(net, x, t) takes a network data structure net
+together with a matrix x of input vectors and a matrix t
+of target vectors, and evaluates the gradient g of the error
+function with respect to the network weights. The error funcion
+corresponds to the choice of output unit activation function. Each row
+of x corresponds to one input vector and each row of t
+corresponds to one target vector.
+
+
[g, gdata, gprior] = mlpgrad(net, x, t) also returns separately
+the data and prior contributions to the gradient. In the case of
+multiple groups in the prior, gprior is a matrix with a row
+for each group and a column for each weight parameter.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlphdotv.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlphdotv.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+
+
+
+Netlab Reference Manual mlphdotv
+
+
+
+
mlphdotv
+
+
+Purpose
+
+Evaluate the product of the data Hessian with a vector.
+
+
+Synopsis
+
+
+hdv = mlphdotv(net, x, t, v)
+
+
+
+Description
+
+
+
hdv = mlphdotv(net, x, t, v) takes an MLP network data structure
+net, together with the matrix x of input vectors, the
+matrix t of target vectors and an arbitrary row vector v
+whose length equals the number of parameters in the network, and
+returns the product of the data-dependent contribution to the Hessian
+matrix with v. The implementation is based on the R-propagation
+algorithm of Pearlmutter.
+
+
+h = mlphess(net, x, t) takes an MLP network data structure net,
+a matrix x of input values, and a matrix t of target
+values and returns the full Hessian matrix h corresponding to
+the second derivatives of the negative log posterior distribution,
+evaluated for the current weight and bias values as defined by
+net.
+
+
[h, hdata] = mlphess(net, x, t) returns both the Hessian matrix
+h and the contribution hdata arising from the data dependent
+term in the Hessian.
+
+
h = mlphess(net, x, t, hdata) takes a network data structure
+net, a matrix x of input values, and a matrix t of
+target values, together with the contribution hdata arising from
+the data dependent term in the Hessian, and returns the full Hessian
+matrix h corresponding to the second derivatives of the negative
+log posterior distribution. This version saves computation time if
+hdata has already been evaluated for the current weight and bias
+values.
+
+
+Example
+
+For the standard regression framework with a Gaussian conditional
+distribution of target values given input values, and a simple
+Gaussian prior over weights, the Hessian takes the form
+
+
+ h = beta*hd + alpha*I
+
+
+where the contribution hd is evaluated by calls to mlphdotv and
+h is the full Hessian.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlphint.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlphint.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+
+
+
+Netlab Reference Manual mlphint
+
+
+
+
mlphint
+
+
+Purpose
+
+Plot Hinton diagram for 2-layer feed-forward network.
+
+
+Synopsis
+
+
+mlphint(net)
+[h1, h2] = mlphint(net)
+
+
+
+Description
+
+
+
mlphint(net) takes a network structure net
+and plots the Hinton diagram comprised of two
+figure windows, one displaying the first-layer weights and biases, and
+one displaying the second-layer weights and biases.
+
+
[h1, h2] = mlphint(net) also returns handles h1 and
+h2 to the figures which can be used, for instance, to delete the
+figures when they are no longer needed.
+
+
To print the figure correctly, you should call
+set(h, 'InvertHardCopy', 'on') before printing.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlpinit.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlpinit.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+
+
+
+Netlab Reference Manual mlpinit
+
+
+
+
mlpinit
+
+
+Purpose
+
+Initialise the weights in a 2-layer feedforward network.
+
+
+Synopsis
+
+
+net = mlpinit(net, prior)
+
+
+
+
+Description
+
+
+
net = mlpinit(net, prior) takes a 2-layer feedforward network
+net and sets the weights and biases by sampling from a Gaussian
+distribution. If prior is a scalar, then all of the parameters
+(weights and biases) are sampled from a single isotropic Gaussian with
+inverse variance equal to prior. If prior is a data
+structure of the kind generated by mlpprior, then the parameters
+are sampled from multiple Gaussians according to their groupings
+(defined by the index field) with corresponding variances
+(defined by the alpha field).
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlppak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlppak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+
+
+
+Netlab Reference Manual mlppak
+
+
+
+
mlppak
+
+
+Purpose
+
+Combines weights and biases into one weights vector.
+
+
+Synopsis
+
+
+w = mlppak(net)
+
+
+
+
+Description
+
+w = mlppak(net) takes a network data structure net and
+combines the component weight matrices bias vectors into a single row
+vector w. The facility to switch between these two
+representations for the network parameters is useful, for example, in
+training a network by error function minimization, since a single
+vector of parameters can be handled by general-purpose optimization
+routines.
+
+
The ordering of the paramters in w is defined by
+
+
+ w = [net.w1(:)', net.b1, net.w2(:)', net.b2];
+
+
+where w1 is the first-layer weight matrix, b1 is the
+first-layer bias vector, w2 is the second-layer weight matrix,
+and b2 is the second-layer bias vector.
+
+
+prior = mlpprior(nin, nhidden, nout, aw1, ab1, aw2, ab2)
+generates a data structure
+prior, with fields prior.alpha and prior.index, which
+specifies a Gaussian prior distribution for the network weights in a
+two-layer feedforward network. Two different cases are possible. In
+the first case, aw1, ab1, aw2 and ab2 are all
+scalars and represent the regularization coefficients for four groups
+of parameters in the network corresponding to first-layer weights,
+first-layer biases, second-layer weights, and second-layer biases
+respectively. Then prior.alpha represents a column vector of
+length 4 containing the parameters, and prior.index is a matrix
+specifying which weights belong in each group. Each column has one
+element for each weight in the matrix, using the standard ordering as
+defined in mlppak, and each element is 1 or 0 according to
+whether the weight is a member of the corresponding group or not. In
+the second case the parameter aw1 is a vector of length equal to
+the number of inputs in the network, and the corresponding matrix
+prior.index now partitions the first-layer weights into groups
+corresponding to the weights fanning out of each input unit. This
+prior is appropriate for the technique of automatic relevance
+determination.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlptrain.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlptrain.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,35 @@
+
+
+
+Netlab Reference Manual mlptrain
+
+
+
+
mlptrain
+
+
+Purpose
+
+Utility to train an MLP network for demtrain
+
+
+Description
+
+
+
[net, error] = mlptrain(net, x, t, its) trains a network data
+structure net using the scaled conjugate gradient algorithm
+for its cycles with
+input data x, target data t.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/mlpunpak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/mlpunpak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual mlpunpak
+
+
+
+
mlpunpak
+
+
+Purpose
+
+Separates weights vector into weight and bias matrices.
+
+
+Synopsis
+
+
+net = mlpunpak(net, w)
+
+
+
+
+Description
+
+net = mlpunpak(net, w) takes an mlp network data structure net and
+a weight vector w, and returns a network data structure identical to
+the input network, except that the first-layer weight matrix
+w1, the first-layer bias vector b1, the second-layer
+weight matrix w2 and the second-layer bias vector b2 have all
+been set to the corresponding elements of w.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/netderiv.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/netderiv.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual netderiv
+
+
+
+
netderiv
+
+
+Purpose
+
+Evaluate derivatives of network outputs by weights generically.
+
+
+Synopsis
+
+
+g = netderiv(w, net, x)
+
+
+
+Description
+
+
+
g = netderiv(w, net, x) takes a weight vector w and a network
+data structure net, together with the matrix x of input
+vectors, and returns the
+gradient of the outputs with respect to the weights evaluated at w.
+
+
e = neterr(w, net, x, t) takes a weight vector w and a network
+data structure net, together with the matrix x of input
+vectors and the matrix t of target vectors, and returns the
+value of the error function evaluated at w.
+
+
[e, varargout] = neterr(w, net, x, t) also returns any additional
+return values from the error function.
+
+
+[y, extra] = netevfwd(w, net, x, t, x_test) takes a network data
+structure
+net together with the input x and target t training data
+and input test data x_test.
+It returns the normal forward propagation through the network y
+together with a matrix extra which consists of error bars (variance)
+for a regression problem or moderated outputs for a classification problem.
+
+
The optional argument (and return value)
+invhess is the inverse of the network Hessian
+computed on the training data inputs and targets. Passing it in avoids
+recomputing it, which can be a significant saving for large training sets.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/netgrad.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/netgrad.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual netgrad
+
+
+
+
netgrad
+
+
+Purpose
+
+Evaluate network error gradient for generic optimizers
+
+
+Synopsis
+
+
+g = netgrad(w, net, x, t)
+
+
+
+Description
+
+
+
g = netgrad(w, net, x, t) takes a weight vector w and a network
+data structure net, together with the matrix x of input
+vectors and the matrix t of target vectors, and returns the
+gradient of the error function evaluated at w.
+
+
h = nethess(w, net, x, t) takes a weight vector w and a network
+data structure net, together with the matrix x of input
+vectors and the matrix t of target vectors, and returns the
+value of the Hessian evaluated at w.
+
+
[e, varargout] = nethess(w, net, x, t, varargin) also returns any additional
+return values from the network Hessian function, and passes additional arguments
+to that function.
+
+
+Example
+
+
+
In evidence, this function is called once to compute the
+data contribution to the Hessian
+
+
+[h, dh] = nethess(w, net, x, t, dh);
+
+
+and again to update the Hessian for new values of the hyper-parameters
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/netinit.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/netinit.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+
+
+
+Netlab Reference Manual netinit
+
+
+
+
netinit
+
+
+Purpose
+
+Initialise the weights in a network.
+
+
+Synopsis
+
+
+net = netinit(net, prior)
+
+
+
+
+Description
+
+
+
net = netinit(net, prior) takes a network data structure
+net and sets the weights and biases by sampling from a Gaussian
+distribution. If prior is a scalar, then all of the parameters
+(weights and biases) are sampled from a single isotropic Gaussian with
+inverse variance equal to prior. If prior is a data
+structure of the kind generated by mlpprior, then the parameters
+are sampled from multiple Gaussians according to their groupings
+(defined by the index field) with corresponding variances
+(defined by the alpha field).
+
+
netopt is a helper function which facilitates the training of
+networks using the general purpose optimizers as well as sampling from the
+posterior distribution of parameters using general purpose Markov chain
+Monte Carlo sampling algorithms. It can be used with any function that
+searches in parameter space using error and gradient functions.
+
+
[net, options] = netopt(net, options, x, t, alg) takes a network
+data structure net, together with a vector options of
+parameters governing the behaviour of the optimization algorithm, a
+matrix x of input vectors and a matrix t of target
+vectors, and returns the trained network as well as an updated
+options vector. The string alg determines which optimization
+algorithm (conjgrad, quasinew, scg, etc.) or Monte
+Carlo algorithm (such as hmc) will be used.
+
+
[net, options, varargout] = netopt(net, options, x, t, alg)
+also returns any additional return values from the optimisation algorithm.
+
+
+Examples
+
+Suppose we create a 4-input, 3 hidden unit, 2-output feed-forward
+network using net = mlp(4, 3, 2, 'linear'). We can then train
+the network with the scaled conjugate gradient algorithm by using
+net = netopt(net, options, x, t, 'scg') where x and
+t are the input and target data matrices respectively, and the
+options vector is set appropriately for scg.
+
+
If we also wish to plot the learning curve, we can use the additional
+return value errlog given by scg:
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/netpak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/netpak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+
+
+
+Netlab Reference Manual netpak
+
+
+
+
netpak
+
+
+Purpose
+
+Combines weights and biases into one weights vector.
+
+
+Synopsis
+
+
+w = netpak(net)
+
+
+
+
+Description
+
+w = netpak(net) takes a network data structure net and
+combines the component weight matrices into a single row
+vector w. The facility to switch between these two
+representations for the network parameters is useful, for example, in
+training a network by error function minimization, since a single
+vector of parameters can be handled by general-purpose optimization
+routines. This function also takes into account a mask defined
+as a field in net by removing any weights that correspond to
+entries of 0 in the mask.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/netunpak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/netunpak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+
+
+
+Netlab Reference Manual netunpak
+
+
+
+
netunpak
+
+
+Purpose
+
+Separates weights vector into weight and bias matrices.
+
+
+Synopsis
+
+
+net = netunpak(net, w)
+
+
+
+
+Description
+
+net = netunpak(net, w) takes an net network data structure net and
+a weight vector w, and returns a network data structure identical to
+the input network, except that the componenet weight matrices have all
+been set to the corresponding elements of w. If there is
+a mask field in the net data structure, then the weights in
+w are placed in locations corresponding to non-zero entries in the
+mask (so w should have the same length as the number of non-zero
+entries in the mask).
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/olgd.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/olgd.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,101 @@
+
+
+
+Netlab Reference Manual olgd
+
+
+
+
olgd
+
+
+Purpose
+
+On-line gradient descent optimization.
+
+
+Description
+
+[net, options, errlog, pointlog] = olgd(net, options, x, t) uses
+on-line gradient descent to find a local minimum of the error function for the
+network
+net computed on the input data x and target values
+t. A log of the error values
+after each cycle is (optionally) returned in errlog, and a log
+of the points visited is (optionally) returned in pointlog.
+Because the gradient is computed on-line (i.e. after each pattern)
+this can be quite inefficient in Matlab.
+
+
The error function value at final weight vector is returned
+in options(8).
+
+
The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values; also logs error
+values in the return argument errlog, and the points visited
+in the return argument pointslog. If options(1) is set to 0,
+then only warning messages are displayed. If options(1) is -1,
+then nothing is displayed.
+
+
options(2) is the precision required for the value
+of x at the solution. If the absolute difference between
+the values of x between two successive steps is less than
+options(2), then this condition is satisfied.
+
+
options(3) is the precision required of the objective
+function at the solution. If the absolute difference between the
+error functions between two successive steps is less than
+options(3), then this condition is satisfied.
+Both this and the previous condition must be
+satisfied for termination. Note that testing the function value at each
+iteration roughly halves the speed of the algorithm.
+
+
options(5) determines whether the patterns are sampled randomly
+with replacement. If it is 0 (the default), then patterns are sampled
+in order.
+
+
options(6) determines if the learning rate decays. If it is 1
+then the learning rate decays at a rate of 1/t. If it is 0
+(the default) then the learning rate is constant.
+
+
options(9) should be set to 1 to check the user defined gradient
+function.
+
+
options(10) returns the total number of function evaluations (including
+those in any line searches).
+
+
options(11) returns the total number of gradient evaluations.
+
+
options(14) is the maximum number of iterations (passes through
+the complete pattern set); default 100.
+
+
options(17) is the momentum; default 0.5.
+
+
options(18) is the learning rate; default 0.01.
+
+
+Examples
+
+The following example performs on-line gradient descent on an MLP with
+random sampling from the pattern set.
+
+
+PCcoeff = pca(data) computes the eigenvalues of the covariance
+matrix of the dataset data and returns them as PCcoeff. These
+coefficients give the variance of data along the corresponding
+principal components.
+
+
PCcoeff = pca(data, N) returns the largest N eigenvalues.
+
+
[PCcoeff, PCvec] = pca(data) returns the principal components as
+well as the coefficients. This is considerably more computationally
+demanding than just computing the eigenvalues.
+
+
+plotmat(matrix, textcolour, gridcolour, fontsize) displays the matrix
+matrix on the current figure. The textcolour and gridcolour
+arguments control the colours of the numbers and grid labels respectively and
+should follow the usual Matlab specification.
+The parameter fontsize should be an integer.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/ppca.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/ppca.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+
+
+
+Netlab Reference Manual ppca
+
+
+
+
ppca
+
+
+Purpose
+
+Probabilistic Principal Components Analysis
+
+
+Synopsis
+
+
+[var, U, lambda] = pca(x, ppca_dim)
+
+
+
+
+Description
+
+
+[var, U, lambda] = ppca(x, ppca_dim) computes the principal component
+subspace U of dimension ppca_dim using a centred
+covariance matrix x. The variable var contains
+the off-subspace variance (which is assumed to be spherical), while the
+vector lambda contains the variances of each of the principal
+components. This is computed using the eigenvalue and eigenvector
+decomposition of x.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/quasinew.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/quasinew.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,99 @@
+
+
+
+Netlab Reference Manual quasinew
+
+
+
+
quasinew
+
+
+Purpose
+
+Quasi-Newton optimization.
+
+
+Description
+
+[x, options, flog, pointlog] = quasinew(f, x, options, gradf)
+uses a quasi-Newton
+algorithm to find a local minimum of the function f(x) whose
+gradient is given by gradf(x). Here x is a row vector
+and f returns a scalar value.
+The point at which f has a local minimum
+is returned as x. The function value at that point is returned
+in options(8). A log of the function values
+after each cycle is (optionally) returned in flog, and a log
+of the points visited is (optionally) returned in pointlog.
+
+
quasinew(f, x, options, gradf, p1, p2, ...) allows
+additional arguments to be passed to f() and gradf().
+
+
The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values; also logs error
+values in the return argument errlog, and the points visited
+in the return argument pointslog. If options(1) is set to 0,
+then only warning messages are displayed. If options(1) is -1,
+then nothing is displayed.
+
+
options(2) is a measure of the absolute precision required for the value
+of x at the solution. If the absolute difference between
+the values of x between two successive steps is less than
+options(2), then this condition is satisfied.
+
+
options(3) is a measure of the precision required of the objective
+function at the solution. If the absolute difference between the
+objective function values between two successive steps is less than
+options(3), then this condition is satisfied.
+Both this and the previous condition must be
+satisfied for termination.
+
+
options(9) should be set to 1 to check the user defined gradient
+function.
+
+
options(10) returns the total number of function evaluations (including
+those in any line searches).
+
+
options(11) returns the total number of gradient evaluations.
+
+
options(14) is the maximum number of iterations; default 100.
+
+
options(15) is the precision in parameter space of the line search;
+default 1e-2.
+
+
+Examples
+
+An example of
+the use of the additional arguments is the minimization of an error
+function for a neural network:
+
+
+The quasi-Newton algorithm builds up an
+approximation to the inverse Hessian over a number of steps. The
+method requires order W squared storage, where W is the number of function
+parameters. The Broyden-Fletcher-Goldfarb-Shanno formula for the
+inverse Hessian updates is used. The line searches are carried out to
+a relatively low precision (1.0e-2).
+
+
+net = rbf(nin, nhidden, nout, rbfunc) constructs and initialises
+a radial basis function network returning a data structure net.
+The weights are all initialised with a zero mean, unit variance normal
+distribution, with the exception of the variances, which are set to one.
+This makes use of the Matlab function
+randn and so the seed for the random weight initialization can be
+set using randn('state', s) where s is the seed value. The
+activation functions are defined in terms of the distance between
+the data point and the corresponding centre. Note that the functions are
+computed to a convenient constant multiple: for example, the Gaussian
+is not normalised. (Normalisation is not needed as the function outputs
+are linearly combined in the next layer.)
+
+
The fields in net are
+
+
+ type = 'rbf'
+ nin = number of inputs
+ nhidden = number of hidden units
+ nout = number of outputs
+ nwts = total number of weights and biases
+ actfn = string defining hidden unit activation function:
+ 'gaussian' for a radially symmetric Gaussian function.
+ 'tps' for r^2 log r, the thin plate spline function.
+ 'r4logr' for r^4 log r.
+ outfn = string defining output error function:
+ 'linear' for linear outputs (default) and SoS error.
+ 'neuroscale' for Sammon stress measure.
+ c = centres
+ wi = squared widths (null for rlogr and tps)
+ w2 = second layer weight matrix
+ b2 = second layer bias vector
+
+
+
+
net = rbf(nin, nhidden, nout, rbfund, outfunc) allows the user to
+specify the type of error function to be used. The field outfn
+is set to the value of this string. Linear outputs (for regression problems)
+and Neuroscale outputs (for topographic mappings) are supported.
+
+
net = rbf(nin, nhidden, nout, rbfunc, outfunc, prior, beta),
+in which prior is
+a scalar, allows the field net.alpha in the data structure
+net to be set, corresponding to a zero-mean isotropic Gaussian
+prior with inverse variance with value prior. Alternatively,
+prior can consist of a data structure with fields alpha
+and index, allowing individual Gaussian priors to be set over
+groups of weights in the network. Here alpha is a column vector
+in which each element corresponds to a separate group of weights,
+which need not be mutually exclusive. The membership of the groups is
+defined by the matrix indx in which the columns correspond to
+the elements of alpha. Each column has one element for each
+weight in the matrix, in the order defined by the function
+rbfpak, and each element is 1 or 0 according to whether the
+weight is a member of the corresponding group or not. A utility
+function rbfprior is provided to help in setting up the
+prior data structure.
+
+
net = rbf(nin, nhidden, nout, func, prior, beta) also sets the
+additional field net.beta in the data structure net, where
+beta corresponds to the inverse noise variance.
+
+
+Example
+
+The following code constructs an RBF network with 1 input and output node
+and 5 hidden nodes and then propagates some data x through it.
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/rbfbkp.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/rbfbkp.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+
+
+
+Netlab Reference Manual rbfbkp
+
+
+
+
rbfbkp
+
+
+Purpose
+
+Backpropagate gradient of error function for RBF network.
+
+
+Synopsis
+
+
+g = rbfbkp(net, x, z, n2, deltas)
+
+
+
+Description
+
+g = rbfbkp(net, x, z, n2, deltas) takes a network data structure
+net together with a matrix x of input vectors, a matrix
+z of hidden unit activations, a matrix n2 of the squared
+distances between centres and inputs, and a matrix deltas of the
+gradient of the error function with respect to the values of the
+output units (i.e. the summed inputs to the output units, before the
+activation function is applied). The return value is the gradient
+g of the error function with respect to the network
+weights. Each row of x corresponds to one input vector.
+
+
This function is provided so that the common backpropagation algorithm
+can be used by RBF network models to compute
+gradients for the output values (in rbfderiv) as well as standard error
+functions.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/rbfderiv.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/rbfderiv.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual rbfderiv
+
+
+
+
rbfderiv
+
+
+Purpose
+
+Evaluate derivatives of RBF network outputs with respect to weights.
+
+
+Synopsis
+
+
+g = rbfderiv(net, x)
+
+
+
+Description
+
+g = rbfderiv(net, x) takes a network data structure net
+and a matrix of input vectors x and returns a three-index matrix
+g whose i, j, k element contains the
+derivative of network output k with respect to weight or bias
+parameter j for input pattern i. The ordering of the
+weight and bias parameters is defined by rbfunpak. This
+function also takes into account any mask in the network data structure.
+
+
+e = rbferr(net, x, t) takes a network data structure net together
+with a matrix x of input
+vectors and a matrix t of target vectors, and evaluates the
+appropriate error function e depending on net.outfn.
+Each row of x corresponds to one
+input vector and each row of t contains the corresponding target vector.
+
+
[e, edata, eprior] = rbferr(net, x, t) additionally returns the
+data and prior components of the error, assuming a zero mean Gaussian
+prior on the weights with inverse variance parameters alpha and
+beta taken from the network data structure net.
+
+
+y = rbfevfwd(net, x, t, x_test) takes a network data structure
+net together with the input x and target t training data
+and input test data x_test.
+It returns the normal forward propagation through the network y
+together with a matrix extra which consists of error bars (variance)
+for a regression problem or moderated outputs for a classification problem.
+
+
The optional argument (and return value)
+invhess is the inverse of the network Hessian
+computed on the training data inputs and targets. Passing it in avoids
+recomputing it, which can be a significant saving for large training sets.
+
+
+a = rbffwd(net, x) takes a network data structure
+net and a matrix x of input
+vectors and forward propagates the inputs through the network to generate
+a matrix a of output vectors. Each row of x corresponds to one
+input vector and each row of a contains the corresponding output vector.
+The activation function that is used is determined by net.actfn.
+
+
[a, z, n2] = rbffwd(net, x) also generates a matrix z of
+the hidden unit activations where each row corresponds to one pattern.
+These hidden unit activations represent the design matrix for
+the RBF. The matrix n2 is the squared distances between each
+basis function centre and each pattern in which each row corresponds
+to a data point.
+
+
+g = rbfgrad(net, x, t) takes a network data structure net
+together with a matrix x of input
+vectors and a matrix t of target vectors, and evaluates the gradient
+g of the error function with respect to the network weights (i.e.
+including the hidden unit parameters). The error
+function is sum of squares.
+Each row of x corresponds to one
+input vector and each row of t contains the corresponding target vector.
+If the output function is 'neuroscale' then the gradient is only
+computed for the output layer weights and biases.
+
+
[g, gdata, gprior] = rbfgrad(net, x, t) also returns separately
+the data and prior contributions to the gradient. In the case of
+multiple groups in the prior, gprior is a matrix with a row
+for each group and a column for each weight parameter.
+
+
+h = rbfhess(net, x, t) takes an RBF network data structure net,
+a matrix x of input values, and a matrix t of target
+values and returns the full Hessian matrix h corresponding to
+the second derivatives of the negative log posterior distribution,
+evaluated for the current weight and bias values as defined by
+net. Currently, the implementation only computes the
+Hessian for the output layer weights.
+
+
[h, hdata] = rbfhess(net, x, t) returns both the Hessian matrix
+h and the contribution hdata arising from the data dependent
+term in the Hessian.
+
+
h = rbfhess(net, x, t, hdata) takes a network data structure
+net, a matrix x of input values, and a matrix t of
+target values, together with the contribution hdata arising from
+the data dependent term in the Hessian, and returns the full Hessian
+matrix h corresponding to the second derivatives of the negative
+log posterior distribution. This version saves computation time if
+hdata has already been evaluated for the current weight and bias
+values.
+
+
+Example
+
+For the standard regression framework with a Gaussian conditional
+distribution of target values given input values, and a simple
+Gaussian prior over weights, the Hessian takes the form
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/rbfjacob.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/rbfjacob.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+
+
+
+Netlab Reference Manual rbfjacob
+
+
+
+
rbfjacob
+
+
+Purpose
+
+Evaluate derivatives of RBF network outputs with respect to inputs.
+
+
+Synopsis
+
+
+g = rbfjacob(net, x)
+
+
+
+Description
+
+g = rbfjacob(net, x) takes a network data structure net
+and a matrix of input vectors x and returns a three-index matrix
+g whose i, j, k element contains the
+derivative of network output k with respect to input
+parameter j for input pattern i.
+
+
+[mask, prior] = rbfprior(rbfunc, nin, nhidden, nout, aw2, ab2)
+generates a vector
+mask that selects only the output
+layer weights. This is because most uses of RBF networks in a Bayesian
+context have fixed basis functions with the output layer as the only
+adjustable parameters. In particular, the Neuroscale output error function
+is designed to work only with this mask.
+
+
The return value
+prior is a data structure,
+with fields prior.alpha and prior.index, which
+specifies a Gaussian prior distribution for the network weights in an
+RBF network. The parameters aw2 and ab2 are all
+scalars and represent the regularization coefficients for two groups
+of parameters in the network corresponding to
+ second-layer weights, and second-layer biases
+respectively. Then prior.alpha represents a column vector of
+length 2 containing the parameters, and prior.index is a matrix
+specifying which weights belong in each group. Each column has one
+element for each weight in the matrix, using the standard ordering as
+defined in rbfpak, and each element is 1 or 0 according to
+whether the weight is a member of the corresponding group or not.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/rbfsetbf.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/rbfsetbf.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+
+
+
+Netlab Reference Manual rbfsetbf
+
+
+
+
rbfsetbf
+
+
+Purpose
+
+Set basis functions of RBF from data.
+
+
+Synopsis
+
+
+net = rbfsetbf(net, options, x)
+
+
+
+
+Description
+
+net = rbfsetbf(net, options, x) sets the basis functions of the
+RBF network net so that they model the unconditional density of the
+dataset x. This is done by training a GMM with spherical covariances
+using gmmem. The options vector is passed to gmmem.
+The widths of the functions are set by a call to rbfsetfw.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/rbfsetfw.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/rbfsetfw.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,46 @@
+
+
+
+Netlab Reference Manual rbfsetfw
+
+
+
+
rbfsetfw
+
+
+Purpose
+
+Set basis function widths of RBF.
+
+
+Synopsis
+
+
+net = rbfsetfw(net, scale)
+
+
+
+
+Description
+
+net = rbfsetfw(net, scale) sets the widths of
+the basis functions of the
+RBF network net.
+If Gaussian basis functions are used, then the variances are set to
+the largest squared distance between centres if scale is non-positive
+and scale times the mean distance of each centre to its nearest
+neighbour if scale is positive. Non-Gaussian basis functions do
+not have a width.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/rbftrain.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/rbftrain.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,90 @@
+
+
+
+Netlab Reference Manual rbftrain
+
+
+
+
rbftrain
+
+
+Purpose
+
+Two stage training of RBF network.
+
+
+Description
+
+net = rbftrain(net, options, x, t) uses a
+two stage training
+algorithm to set the weights in the RBF model structure net.
+Each row of x corresponds to one
+input vector and each row of t contains the corresponding target vector.
+The centres are determined by fitting a Gaussian mixture model
+with circular covariances using the EM algorithm through a call to
+rbfsetbf. (The mixture model is
+initialised using a small number of iterations of the K-means algorithm.)
+If the activation functions are Gaussians, then the basis function widths
+are then set to the maximum inter-centre squared distance.
+
+
For linear outputs,
+the hidden to output
+weights that give rise to the least squares solution
+can then be determined using the pseudo-inverse. For neuroscale outputs,
+the hidden to output weights are determined using the iterative shadow
+targets algorithm.
+ Although this two stage
+procedure may not give solutions with as low an error as using general
+purpose non-linear optimisers, it is much faster.
+
+
The options vector may have two rows: if this is the case, then the second row
+is passed to rbfsetbf, which allows the user to specify a different
+number iterations for RBF and GMM training.
+The optional parameters to rbftrain have the following interpretations.
+
+
options(1) is set to 1 to display error values during EM training.
+
+
options(2) is a measure of the precision required for the value
+of the weights w at the solution.
+
+
options(3) is a measure of the precision required of the objective
+function at the solution. Both this and the previous condition must be
+satisfied for termination.
+
+
options(5) is set to 1 if the basis functions parameters should remain
+unchanged; default 0.
+
+
options(6) is set to 1 if the output layer weights should be should
+set using PCA. This is only relevant for Neuroscale outputs; default 0.
+
+
options(14) is the maximum number of iterations for the shadow
+targets algorithm;
+default 100.
+
+
+Example
+
+The following example creates an RBF network and then trains it:
+
+
+net = rbf(1, 4, 1, 'gaussian');
+options(1, :) = foptions;
+options(2, :) = foptions;
+options(2, 14) = 10; % 10 iterations of EM
+options(2, 5) = 1; % Check for covariance collapse in EM
+net = rbftrain(net, options, x, t);
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/rbfunpak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/rbfunpak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual rbfunpak
+
+
+
+
rbfunpak
+
+
+Purpose
+
+Separates a vector of RBF weights into its components.
+
+
+Synopsis
+
+
+net = rbfunpak(net, w)
+
+
+
+
+Description
+
+net = rbfunpak(net, w) takes an RBF network data structure net and
+a weight vector w, and returns a network data structure identical to
+the input network, except that the centres
+c, the widths wi, the second-layer
+weight matrix w2 and the second-layer bias vector b2 have all
+been set to the corresponding elements of w.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/scg.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/scg.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,97 @@
+
+
+
+Netlab Reference Manual scg
+
+
+
+
scg
+
+
+Purpose
+
+Scaled conjugate gradient optimization.
+
+
+Description
+
+[x, options] = scg(f, x, options, gradf) uses a scaled conjugate
+gradients
+algorithm to find a local minimum of the function f(x) whose
+gradient is given by gradf(x). Here x is a row vector
+and f returns a scalar value.
+The point at which f has a local minimum
+is returned as x. The function value at that point is returned
+in options(8).
+
+
[x, options, flog, pointlog, scalelog] = scg(f, x, options, gradf)
+also returns (optionally) a log of the function values
+after each cycle in flog, a log
+of the points visited in pointlog, and a log of the scale values
+in the algorithm in scalelog.
+
+
scg(f, x, options, gradf, p1, p2, ...) allows
+additional arguments to be passed to f() and gradf().
+
+The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values; also logs error
+values in the return argument errlog, and the points visited
+in the return argument pointslog. If options(1) is set to 0,
+then only warning messages are displayed. If options(1) is -1,
+then nothing is displayed.
+
+
options(2) is a measure of the absolute precision required for the value
+of x at the solution. If the absolute difference between
+the values of x between two successive steps is less than
+options(2), then this condition is satisfied.
+
+
options(3) is a measure of the precision required of the objective
+function at the solution. If the absolute difference between the
+objective function values between two successive steps is less than
+options(3), then this condition is satisfied.
+Both this and the previous condition must be
+satisfied for termination.
+
+
options(9) is set to 1 to check the user defined gradient function.
+
+
options(10) returns the total number of function evaluations (including
+those in any line searches).
+
+
options(11) returns the total number of gradient evaluations.
+
+
options(14) is the maximum number of iterations; default 100.
+
+
+Examples
+
+An example of
+the use of the additional arguments is the minimization of an error
+function for a neural network:
+
+The search direction is re-started after every nparams
+successful weight updates where nparams is the total number of
+parameters in x. The algorithm is based on that given by Williams
+(1991), with a simplified procedure for updating lambda when
+rho < 0.25.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/som.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/som.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,58 @@
+
+
+
+Netlab Reference Manual som
+
+
+
+
som
+
+
+Purpose
+
+Creates a Self-Organising Map.
+
+
+Synopsis
+
+
+
+net = som(nin, map_size)
+
+
+
+
+Description
+
+net = som(nin, map_size) creates a SOM net
+with input dimension (i.e. data dimension) nin and map dimensions
+map_size. Only two-dimensional maps are currently implemented.
+
+
The fields in net are
+
+
+ type = 'som'
+ nin = number of inputs
+ map_dim = dimension of map (constrained to be 2)
+ map_size = grid size: number of nodes in each dimension
+ num_nodes = number of nodes: the product of values in map_size
+ map = map_dim+1 dimensional array containing nodes
+ inode_dist = map of inter-node distances using Manhatten metric
+
+
+
+
The map contains the node vectors arranged column-wise in the first
+dimension of the array.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/somfwd.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/somfwd.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+
+
+
+Netlab Reference Manual somfwd
+
+
+
+
somfwd
+
+
+Purpose
+
+Forward propagation through a Self-Organising Map.
+
+
+Synopsis
+
+
+
+d2 = somfwd(net, x)
+
+
+
+
+Description
+
+d2 = somfwd(net, x) propagates the data matrix x through
+ a SOM net, returning the squared distance matrix d2 with
+dimension nin by num_nodes. The $i$th row represents the
+squared Euclidean distance to each of the nodes of the SOM.
+
+
[d2, win_nodes] = somfwd(net, x) also returns the indices of the
+winning nodes for each pattern.
+
+
+Example
+
+
+
The following code fragment creates a SOM with a $5times 5$ map for an
+8-dimensional data space. It then applies the test data to the map.
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/sompak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/sompak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual sompak
+
+
+
+
sompak
+
+
+Purpose
+
+Combines node weights into one weights matrix.
+
+
+Synopsis
+
+
+c = sompak(net)
+
+
+
+
+Description
+
+c = sompak(net) takes a SOM data structure net and
+combines the node weights into a matrix of centres
+c where each row represents the node vector.
+
+
The ordering of the parameters in w is defined by the indexing of the
+multi-dimensional array net.map.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/somtrain.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/somtrain.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,104 @@
+
+
+
+Netlab Reference Manual somtrain
+
+
+
+
somtrain
+
+
+Purpose
+
+Kohonen training algorithm for SOM.
+
+
+Synopsis
+
+
+
+net = somtrain{net, options, x)
+
+
+
+
+Description
+
+net = somtrain{net, options, x) uses Kohonen's algorithm to
+train a SOM. Both on-line and batch algorithms are implemented.
+The learning rate (for on-line) and neighbourhood size decay linearly.
+There is no error function minimised during training (so there is
+no termination criterion other than the number of epochs), but the
+sum-of-squares is computed and returned in options(8).
+
+
The optional parameters have the following interpretations.
+
+
options(1) is set to 1 to display error values; also logs learning
+rate alpha and neighbourhood size nsize.
+Otherwise nothing is displayed.
+
+
options(5) determines whether the patterns are sampled randomly
+with replacement. If it is 0 (the default), then patterns are sampled
+in order. This is only relevant to the on-line algorithm.
+
+
options(6) determines if the on-line or batch algorithm is
+used. If it is 1
+then the batch algorithm is used. If it is 0
+(the default) then the on-line algorithm is used.
+
+
options(14) is the maximum number of iterations (passes through
+the complete pattern set); default 100.
+
+
options(15) is the final neighbourhood size; default value is the
+same as the initial neighbourhood size.
+
+
options(16) is the final learning rate; default value is the same
+as the initial learning rate.
+
+
options(17) is the initial neighbourhood size; default 0.5*maximum
+map size.
+
+
options(18) is the initial learning rate; default 0.9. This parameter
+must be positive.
+
+
+Examples
+
+The following example performs on-line training on a SOM in two stages:
+ordering and convergence.
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/nethelp3.3/somunpak.htm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/nethelp3.3/somunpak.htm Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+
+
+
+Netlab Reference Manual somunpak
+
+
+
+
somunpak
+
+
+Purpose
+
+Replaces node weights in SOM.
+
+
+Synopsis
+
+
+net = somunpak(net, w)
+
+
+
+
+Description
+
+net = somunpak(net, w) takes a SOM data structure net and
+weight matrix w (each node represented by a row) and
+puts the nodes back into the multi-dimensional array net.map.
+
+
The ordering of the parameters in w is defined by the indexing of the
+multi-dimensional array net.map.
+
+
Copyright (c) Ian T Nabney (1996-9)
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,180 @@
+/Contents.m/1.1.1.1/Wed Apr 27 17:59:04 2005//
+/LICENSE/1.1.1.1/Wed Apr 27 17:59:04 2005//
+/conffig.m/1.1.1.1/Wed Apr 27 17:59:04 2005//
+/confmat.m/1.1.1.1/Wed Apr 27 17:59:04 2005//
+/conjgrad.m/1.1.1.1/Wed Apr 27 17:59:04 2005//
+/consist.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/convertoldnet.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/datread.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/datwrite.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/dem2ddat.m/1.1.1.1/Tue May 10 01:32:18 2005//
+/demard.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demev1.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demev2.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demev3.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgauss.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demglm1.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demglm2.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgmm1.m/1.1.1.1/Sun May 8 17:51:14 2005//
+/demgmm2.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgmm3.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgmm4.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgmm5.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgp.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgpard.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgpot.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgtm1.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demgtm2.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demhint.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demhmc1.m/1.1.1.1/Tue May 24 00:03:34 2005//
+/demhmc2.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demhmc3.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demkmn1.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demknn1.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demmdn1.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demmet1.m/1.1.1.1/Mon May 23 06:23:08 2005//
+/demmlp1.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demmlp2.m/1.1.1.1/Wed Apr 27 17:59:06 2005//
+/demnlab.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/demns1.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/demolgd1.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/demopt1.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/dempot.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/demprgp.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/demprior.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/demrbf1.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/demsom1.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/demtrain.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/dist2.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/eigdec.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/errbayes.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/evidence.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/fevbayes.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/gauss.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/gbayes.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glm.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glmderiv.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glmerr.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glmevfwd.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glmfwd.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glmgrad.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glmhess.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glminit.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glmpak.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glmtrain.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/glmunpak.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/gmm.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/gmmactiv.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/gmmem.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/gmminit.m/1.1.1.1/Mon May 23 23:44:46 2005//
+/gmmpak.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/gmmpost.m/1.1.1.1/Wed Apr 27 17:59:08 2005//
+/gmmprob.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gmmsamp.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gmmunpak.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gp.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gpcovar.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gpcovarf.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gpcovarp.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gperr.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gpfwd.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gpgrad.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gpinit.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gppak.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gpunpak.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gradchek.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/graddesc.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gsamp.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gtm.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gtmem.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gtmfwd.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gtminit.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gtmlmean.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gtmlmode.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gtmmag.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gtmpost.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/gtmprob.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/hbayes.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/hesschek.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/hintmat.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/hinton.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/histp.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/hmc.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/kmeansNetlab.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/knn.m/1.1.1.1/Wed Apr 27 17:59:10 2005//
+/knnfwd.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/linef.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/linemin.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/maxitmess.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdn.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdn2gmm.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdndist2.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdnerr.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdnfwd.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdngrad.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdninit.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdnnet.mat/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdnpak.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdnpost.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdnprob.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mdnunpak.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/metrop.m/1.1.1.1/Mon May 23 23:07:40 2005//
+/minbrack.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlp.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlpbkp.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlpderiv.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlperr.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlpevfwd.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlpfwd.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlpgrad.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlphdotv.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlphess.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlphint.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlpinit.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlppak.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlpprior.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlptrain.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/mlpunpak.m/1.1.1.1/Wed Apr 27 17:59:12 2005//
+/netderiv.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/neterr.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/netevfwd.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/netgrad.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/nethess.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/netinit.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/netlab3.3.zip/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/netlogo.mat/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/netopt.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/netpak.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/netunpak.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/oilTrn.dat/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/oilTst.dat/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/olgd.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/pca.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/plotmat.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/ppca.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/quasinew.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbf.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfbkp.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfderiv.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbferr.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfevfwd.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbffwd.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfgrad.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfhess.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfjacob.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfpak.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfprior.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfsetbf.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbfsetfw.m/1.1.1.1/Wed Apr 27 17:59:14 2005//
+/rbftrain.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/rbfunpak.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/rosegrad.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/rosen.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/scg.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/som.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/somfwd.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/sompak.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/somtrain.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/somunpak.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/xor.dat/1.1.1.1/Wed Apr 27 17:59:16 2005//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/netlab3.3
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/Contents.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/Contents.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,176 @@
+% Netlab Toolbox
+% Version 3.3.1 18-Jun-2004
+%
+% conffig - Display a confusion matrix.
+% confmat - Compute a confusion matrix.
+% conjgrad - Conjugate gradients optimization.
+% consist - Check that arguments are consistent.
+% convertoldnet- Convert pre-2.3 release MLP and MDN nets to new format
+% datread - Read data from an ascii file.
+% datwrite - Write data to ascii file.
+% dem2ddat - Generates two dimensional data for demos.
+% demard - Automatic relevance determination using the MLP.
+% demev1 - Demonstrate Bayesian regression for the MLP.
+% demev2 - Demonstrate Bayesian classification for the MLP.
+% demev3 - Demonstrate Bayesian regression for the RBF.
+% demgauss - Demonstrate sampling from Gaussian distributions.
+% demglm1 - Demonstrate simple classification using a generalized linear model.
+% demglm2 - Demonstrate simple classification using a generalized linear model.
+% demgmm1 - Demonstrate density modelling with a Gaussian mixture model.
+% demgmm3 - Demonstrate density modelling with a Gaussian mixture model.
+% demgmm4 - Demonstrate density modelling with a Gaussian mixture model.
+% demgmm5 - Demonstrate density modelling with a PPCA mixture model.
+% demgp - Demonstrate simple regression using a Gaussian Process.
+% demgpard - Demonstrate ARD using a Gaussian Process.
+% demgpot - Computes the gradient of the negative log likelihood for a mixture model.
+% demgtm1 - Demonstrate EM for GTM.
+% demgtm2 - Demonstrate GTM for visualisation.
+% demhint - Demonstration of Hinton diagram for 2-layer feed-forward network.
+% demhmc1 - Demonstrate Hybrid Monte Carlo sampling on mixture of two Gaussians.
+% demhmc2 - Demonstrate Bayesian regression with Hybrid Monte Carlo sampling.
+% demhmc3 - Demonstrate Bayesian regression with Hybrid Monte Carlo sampling.
+% demkmean - Demonstrate simple clustering model trained with K-means.
+% demknn1 - Demonstrate nearest neighbour classifier.
+% demmdn1 - Demonstrate fitting a multi-valued function using a Mixture Density Network.
+% demmet1 - Demonstrate Markov Chain Monte Carlo sampling on a Gaussian.
+% demmlp1 - Demonstrate simple regression using a multi-layer perceptron
+% demmlp2 - Demonstrate simple classification using a multi-layer perceptron
+% demnlab - A front-end Graphical User Interface to the demos
+% demns1 - Demonstrate Neuroscale for visualisation.
+% demolgd1 - Demonstrate simple MLP optimisation with on-line gradient descent
+% demopt1 - Demonstrate different optimisers on Rosenbrock's function.
+% dempot - Computes the negative log likelihood for a mixture model.
+% demprgp - Demonstrate sampling from a Gaussian Process prior.
+% demprior - Demonstrate sampling from a multi-parameter Gaussian prior.
+% demrbf1 - Demonstrate simple regression using a radial basis function network.
+% demsom1 - Demonstrate SOM for visualisation.
+% demtrain - Demonstrate training of MLP network.
+% dist2 - Calculates squared distance between two sets of points.
+% eigdec - Sorted eigendecomposition
+% errbayes - Evaluate Bayesian error function for network.
+% evidence - Re-estimate hyperparameters using evidence approximation.
+% fevbayes - Evaluate Bayesian regularisation for network forward propagation.
+% gauss - Evaluate a Gaussian distribution.
+% gbayes - Evaluate gradient of Bayesian error function for network.
+% glm - Create a generalized linear model.
+% glmderiv - Evaluate derivatives of GLM outputs with respect to weights.
+% glmerr - Evaluate error function for generalized linear model.
+% glmevfwd - Forward propagation with evidence for GLM
+% glmfwd - Forward propagation through generalized linear model.
+% glmgrad - Evaluate gradient of error function for generalized linear model.
+% glmhess - Evaluate the Hessian matrix for a generalised linear model.
+% glminit - Initialise the weights in a generalized linear model.
+% glmpak - Combines weights and biases into one weights vector.
+% glmtrain - Specialised training of generalized linear model
+% glmunpak - Separates weights vector into weight and bias matrices.
+% gmm - Creates a Gaussian mixture model with specified architecture.
+% gmmactiv - Computes the activations of a Gaussian mixture model.
+% gmmem - EM algorithm for Gaussian mixture model.
+% gmminit - Initialises Gaussian mixture model from data
+% gmmpak - Combines all the parameters in a Gaussian mixture model into one vector.
+% gmmpost - Computes the class posterior probabilities of a Gaussian mixture model.
+% gmmprob - Computes the data probability for a Gaussian mixture model.
+% gmmsamp - Sample from a Gaussian mixture distribution.
+% gmmunpak - Separates a vector of Gaussian mixture model parameters into its components.
+% gp - Create a Gaussian Process.
+% gpcovar - Calculate the covariance for a Gaussian Process.
+% gpcovarf - Calculate the covariance function for a Gaussian Process.
+% gpcovarp - Calculate the prior covariance for a Gaussian Process.
+% gperr - Evaluate error function for Gaussian Process.
+% gpfwd - Forward propagation through Gaussian Process.
+% gpgrad - Evaluate error gradient for Gaussian Process.
+% gpinit - Initialise Gaussian Process model.
+% gppak - Combines GP hyperparameters into one vector.
+% gpunpak - Separates hyperparameter vector into components.
+% gradchek - Checks a user-defined gradient function using finite differences.
+% graddesc - Gradient descent optimization.
+% gsamp - Sample from a Gaussian distribution.
+% gtm - Create a Generative Topographic Map.
+% gtmem - EM algorithm for Generative Topographic Mapping.
+% gtmfwd - Forward propagation through GTM.
+% gtminit - Initialise the weights and latent sample in a GTM.
+% gtmlmean - Mean responsibility for data in a GTM.
+% gtmlmode - Mode responsibility for data in a GTM.
+% gtmmag - Magnification factors for a GTM
+% gtmpost - Latent space responsibility for data in a GTM.
+% gtmprob - Probability for data under a GTM.
+% hbayes - Evaluate Hessian of Bayesian error function for network.
+% hesschek - Use central differences to confirm correct evaluation of Hessian matrix.
+% hintmat - Evaluates the coordinates of the patches for a Hinton diagram.
+% hinton - Plot Hinton diagram for a weight matrix.
+% histp - Histogram estimate of 1-dimensional probability distribution.
+% hmc - Hybrid Monte Carlo sampling.
+% kmeans - Trains a k means cluster model.
+% knn - Creates a K-nearest-neighbour classifier.
+% knnfwd - Forward propagation through a K-nearest-neighbour classifier.
+% linef - Calculate function value along a line.
+% linemin - One dimensional minimization.
+% maxitmess- Create a standard error message when training reaches max. iterations.
+% mdn - Creates a Mixture Density Network with specified architecture.
+% mdn2gmm - Converts an MDN mixture data structure to array of GMMs.
+% mdndist2 - Calculates squared distance between centres of Gaussian kernels and data
+% mdnerr - Evaluate error function for Mixture Density Network.
+% mdnfwd - Forward propagation through Mixture Density Network.
+% mdngrad - Evaluate gradient of error function for Mixture Density Network.
+% mdninit - Initialise the weights in a Mixture Density Network.
+% mdnpak - Combines weights and biases into one weights vector.
+% mdnpost - Computes the posterior probability for each MDN mixture component.
+% mdnprob - Computes the data probability likelihood for an MDN mixture structure.
+% mdnunpak - Separates weights vector into weight and bias matrices.
+% metrop - Markov Chain Monte Carlo sampling with Metropolis algorithm.
+% minbrack - Bracket a minimum of a function of one variable.
+% mlp - Create a 2-layer feedforward network.
+% mlpbkp - Backpropagate gradient of error function for 2-layer network.
+% mlpderiv - Evaluate derivatives of network outputs with respect to weights.
+% mlperr - Evaluate error function for 2-layer network.
+% mlpevfwd - Forward propagation with evidence for MLP
+% mlpfwd - Forward propagation through 2-layer network.
+% mlpgrad - Evaluate gradient of error function for 2-layer network.
+% mlphdotv - Evaluate the product of the data Hessian with a vector.
+% mlphess - Evaluate the Hessian matrix for a multi-layer perceptron network.
+% mlphint - Plot Hinton diagram for 2-layer feed-forward network.
+% mlpinit - Initialise the weights in a 2-layer feedforward network.
+% mlppak - Combines weights and biases into one weights vector.
+% mlpprior - Create Gaussian prior for mlp.
+% mlptrain - Utility to train an MLP network for demtrain
+% mlpunpak - Separates weights vector into weight and bias matrices.
+% netderiv - Evaluate derivatives of network outputs by weights generically.
+% neterr - Evaluate network error function for generic optimizers
+% netevfwd - Generic forward propagation with evidence for network
+% netgrad - Evaluate network error gradient for generic optimizers
+% nethess - Evaluate network Hessian
+% netinit - Initialise the weights in a network.
+% netopt - Optimize the weights in a network model.
+% netpak - Combines weights and biases into one weights vector.
+% netunpak - Separates weights vector into weight and bias matrices.
+% olgd - On-line gradient descent optimization.
+% pca - Principal Components Analysis
+% plotmat - Display a matrix.
+% ppca - Probabilistic Principal Components Analysis
+% quasinew - Quasi-Newton optimization.
+% rbf - Creates an RBF network with specified architecture
+% rbfbkp - Backpropagate gradient of error function for RBF network.
+% rbfderiv - Evaluate derivatives of RBF network outputs with respect to weights.
+% rbferr - Evaluate error function for RBF network.
+% rbfevfwd - Forward propagation with evidence for RBF
+% rbffwd - Forward propagation through RBF network with linear outputs.
+% rbfgrad - Evaluate gradient of error function for RBF network.
+% rbfhess - Evaluate the Hessian matrix for RBF network.
+% rbfjacob - Evaluate derivatives of RBF network outputs with respect to inputs.
+% rbfpak - Combines all the parameters in an RBF network into one weights vector.
+% rbfprior - Create Gaussian prior and output layer mask for RBF.
+% rbfsetbf - Set basis functions of RBF from data.
+% rbfsetfw - Set basis function widths of RBF.
+% rbftrain - Two stage training of RBF network.
+% rbfunpak - Separates a vector of RBF weights into its components.
+% rosegrad - Calculate gradient of Rosenbrock's function.
+% rosen - Calculate Rosenbrock's function.
+% scg - Scaled conjugate gradient optimization.
+% som - Creates a Self-Organising Map.
+% somfwd - Forward propagation through a Self-Organising Map.
+% sompak - Combines node weights into one weights matrix.
+% somtrain - Kohonen training algorithm for SOM.
+% somunpak - Replaces node weights in SOM.
+%
+% Copyright (c) Ian T Nabney (1996-2001)
+%
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/LICENSE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/LICENSE Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+ Copyright (c) 1996-2001, Ian T. Nabney
+ All rights reserved.
+
+ Redistribution and use in source and binary
+ forms, with or without modification, are
+ permitted provided that the following
+ conditions are met:
+
+ * Redistributions of source code must
+ retain the above copyright notice, this
+ list of conditions and the following
+ disclaimer.
+ * Redistributions in binary form must
+ reproduce the above copyright notice,
+ this list of conditions and the
+ following disclaimer in the
+ documentation and/or other materials
+ provided with the distribution.
+ * Neither the name of the Aston University, Birmingham, U.K.
+ nor the names of its contributors may be
+ used to endorse or promote products
+ derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
+ HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
+ NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/conffig.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/conffig.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function fh=conffig(y, t)
+%CONFFIG Display a confusion matrix.
+%
+% Description
+% CONFFIG(Y, T) displays the confusion matrix and classification
+% performance for the predictions mat{y} compared with the targets T.
+% The data is assumed to be in a 1-of-N encoding, unless there is just
+% one column, when it is assumed to be a 2 class problem with a 0-1
+% encoding. Each row of Y and T corresponds to a single example.
+%
+% In the confusion matrix, the rows represent the true classes and the
+% columns the predicted classes.
+%
+% FH = CONFFIG(Y, T) also returns the figure handle FH which can be
+% used, for instance, to delete the figure when it is no longer needed.
+%
+% See also
+% CONFMAT, DEMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+[C, rate] = confmat(y, t);
+
+fh = figure('Name', 'Confusion matrix', ...
+ 'NumberTitle', 'off');
+
+plotmat(C, 'k', 'k', 14);
+title(['Classification rate: ' num2str(rate(1)) '%'], 'FontSize', 14);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/confmat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/confmat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,56 @@
+function [C,rate]=confmat(Y,T)
+%CONFMAT Compute a confusion matrix.
+%
+% Description
+% [C, RATE] = CONFMAT(Y, T) computes the confusion matrix C and
+% classification performance RATE for the predictions mat{y} compared
+% with the targets T. The data is assumed to be in a 1-of-N encoding,
+% unless there is just one column, when it is assumed to be a 2 class
+% problem with a 0-1 encoding. Each row of Y and T corresponds to a
+% single example.
+%
+% In the confusion matrix, the rows represent the true classes and the
+% columns the predicted classes. The vector RATE has two entries: the
+% percentage of correct classifications and the total number of correct
+% classifications.
+%
+% See also
+% CONFFIG, DEMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+[n c]=size(Y);
+[n2 c2]=size(T);
+
+if n~=n2 | c~=c2
+ error('Outputs and targets are different sizes')
+end
+
+if c > 1
+ % Find the winning class assuming 1-of-N encoding
+ [maximum Yclass] = max(Y', [], 1);
+
+ TL=[1:c]*T';
+else
+ % Assume two classes with 0-1 encoding
+ c = 2;
+ class2 = find(T > 0.5);
+ TL = ones(n, 1);
+ TL(class2) = 2;
+ class2 = find(Y > 0.5);
+ Yclass = ones(n, 1);
+ Yclass(class2) = 2;
+end
+
+% Compute
+correct = (Yclass==TL);
+total=sum(sum(correct));
+rate=[total*100/n total];
+
+C=zeros(c,c);
+for i=1:c
+ for j=1:c
+ C(i,j) = sum((Yclass==j).*(TL==i));
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/conjgrad.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/conjgrad.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,168 @@
+function [x, options, flog, pointlog] = conjgrad(f, x, options, gradf, ...
+ varargin)
+%CONJGRAD Conjugate gradients optimization.
+%
+% Description
+% [X, OPTIONS, FLOG, POINTLOG] = CONJGRAD(F, X, OPTIONS, GRADF) uses a
+% conjugate gradients algorithm to find the minimum of the function
+% F(X) whose gradient is given by GRADF(X). Here X is a row vector and
+% F returns a scalar value. The point at which F has a local minimum
+% is returned as X. The function value at that point is returned in
+% OPTIONS(8). A log of the function values after each cycle is
+% (optionally) returned in FLOG, and a log of the points visited is
+% (optionally) returned in POINTLOG.
+%
+% CONJGRAD(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional
+% arguments to be passed to F() and GRADF().
+%
+% The optional parameters have the following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values; also logs error
+% values in the return argument ERRLOG, and the points visited in the
+% return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
+% warning messages are displayed. If OPTIONS(1) is -1, then nothing is
+% displayed.
+%
+% OPTIONS(2) is a measure of the absolute precision required for the
+% value of X at the solution. If the absolute difference between the
+% values of X between two successive steps is less than OPTIONS(2),
+% then this condition is satisfied.
+%
+% OPTIONS(3) is a measure of the precision required of the objective
+% function at the solution. If the absolute difference between the
+% objective function values between two successive steps is less than
+% OPTIONS(3), then this condition is satisfied. Both this and the
+% previous condition must be satisfied for termination.
+%
+% OPTIONS(9) is set to 1 to check the user defined gradient function.
+%
+% OPTIONS(10) returns the total number of function evaluations
+% (including those in any line searches).
+%
+% OPTIONS(11) returns the total number of gradient evaluations.
+%
+% OPTIONS(14) is the maximum number of iterations; default 100.
+%
+% OPTIONS(15) is the precision in parameter space of the line search;
+% default 1E-4.
+%
+% See also
+% GRADDESC, LINEMIN, MINBRACK, QUASINEW, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set up the options.
+if length(options) < 18
+ error('Options vector too short')
+end
+
+if(options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+
+% Set up options for line search
+line_options = foptions;
+% Need a precise line search for success
+if options(15) > 0
+ line_options(2) = options(15);
+else
+ line_options(2) = 1e-4;
+end
+
+display = options(1);
+
+% Next two lines allow conjgrad to work with expression strings
+f = fcnchk(f, length(varargin));
+gradf = fcnchk(gradf, length(varargin));
+
+% Check gradients
+if (options(9))
+ feval('gradchek', x, f, gradf, varargin{:});
+end
+
+options(10) = 0;
+options(11) = 0;
+nparams = length(x);
+fnew = feval(f, x, varargin{:});
+options(10) = options(10) + 1;
+gradnew = feval(gradf, x, varargin{:});
+options(11) = options(11) + 1;
+d = -gradnew; % Initial search direction
+br_min = 0;
+br_max = 1.0; % Initial value for maximum distance to search along
+tol = sqrt(eps);
+
+j = 1;
+if nargout >= 3
+ flog(j, :) = fnew;
+ if nargout == 4
+ pointlog(j, :) = x;
+ end
+end
+
+while (j <= niters)
+
+ xold = x;
+ fold = fnew;
+ gradold = gradnew;
+
+ gg = gradold*gradold';
+ if (gg == 0.0)
+ % If the gradient is zero then we are done.
+ options(8) = fnew;
+ return;
+ end
+
+ % This shouldn't occur, but rest of code depends on d being downhill
+ if (gradnew*d' > 0)
+ d = -d;
+ if options(1) >= 0
+ warning('search direction uphill in conjgrad');
+ end
+ end
+
+ line_sd = d./norm(d);
+ [lmin, line_options] = feval('linemin', f, xold, line_sd, fold, ...
+ line_options, varargin{:});
+ options(10) = options(10) + line_options(10);
+ options(11) = options(11) + line_options(11);
+ % Set x and fnew to be the actual search point we have found
+ x = xold + lmin * line_sd;
+ fnew = line_options(8);
+
+ % Check for termination
+ if (max(abs(x - xold)) < options(2) & max(abs(fnew - fold)) < options(3))
+ options(8) = fnew;
+ return;
+ end
+
+ gradnew = feval(gradf, x, varargin{:});
+ options(11) = options(11) + 1;
+
+ % Use Polak-Ribiere formula to update search direction
+ gamma = ((gradnew - gradold)*(gradnew)')/gg;
+ d = (d .* gamma) - gradnew;
+
+ if (display > 0)
+ fprintf(1, 'Cycle %4d Function %11.6f\n', j, line_options(8));
+ end
+
+ j = j + 1;
+ if nargout >= 3
+ flog(j, :) = fnew;
+ if nargout == 4
+ pointlog(j, :) = x;
+ end
+ end
+end
+
+% If we get here, then we haven't terminated in the given number of
+% iterations.
+
+options(8) = fold;
+if (options(1) >= 0)
+ disp(maxitmess);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/consist.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/consist.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,87 @@
+function errstring = consist(model, type, inputs, outputs)
+%CONSIST Check that arguments are consistent.
+%
+% Description
+%
+% ERRSTRING = CONSIST(NET, TYPE, INPUTS) takes a network data structure
+% NET together with a string TYPE containing the correct network type,
+% a matrix INPUTS of input vectors and checks that the data structure
+% is consistent with the other arguments. An empty string is returned
+% if there is no error, otherwise the string contains the relevant
+% error message. If the TYPE string is empty, then any type of network
+% is allowed.
+%
+% ERRSTRING = CONSIST(NET, TYPE) takes a network data structure NET
+% together with a string TYPE containing the correct network type, and
+% checks that the two types match.
+%
+% ERRSTRING = CONSIST(NET, TYPE, INPUTS, OUTPUTS) also checks that the
+% network has the correct number of outputs, and that the number of
+% patterns in the INPUTS and OUTPUTS is the same. The fields in NET
+% that are used are
+% type
+% nin
+% nout
+%
+% See also
+% MLPFWD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Assume that all is OK as default
+errstring = '';
+
+% If type string is not empty
+if ~isempty(type)
+ % First check that model has type field
+ if ~isfield(model, 'type')
+ errstring = 'Data structure does not contain type field';
+ return
+ end
+ % Check that model has the correct type
+ s = model.type;
+ if ~strcmp(s, type)
+ errstring = ['Model type ''', s, ''' does not match expected type ''',...
+ type, ''''];
+ return
+ end
+end
+
+% If inputs are present, check that they have correct dimension
+if nargin > 2
+ if ~isfield(model, 'nin')
+ errstring = 'Data structure does not contain nin field';
+ return
+ end
+
+ data_nin = size(inputs, 2);
+ if model.nin ~= data_nin
+ errstring = ['Dimension of inputs ', num2str(data_nin), ...
+ ' does not match number of model inputs ', num2str(model.nin)];
+ return
+ end
+end
+
+% If outputs are present, check that they have correct dimension
+if nargin > 3
+ if ~isfield(model, 'nout')
+ errstring = 'Data structure does not conatin nout field';
+ return
+ end
+ data_nout = size(outputs, 2);
+ if model.nout ~= data_nout
+ errstring = ['Dimension of outputs ', num2str(data_nout), ...
+ ' does not match number of model outputs ', num2str(model.nout)];
+ return
+ end
+
+% Also check that number of data points in inputs and outputs is the same
+ num_in = size(inputs, 1);
+ num_out = size(outputs, 1);
+ if num_in ~= num_out
+ errstring = ['Number of input patterns ', num2str(num_in), ...
+ ' does not match number of output patterns ', num2str(num_out)];
+ return
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/convertoldnet.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/convertoldnet.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function net = convertoldnet(net)
+%CONVERTOLDNET Convert pre-2.3 release MLP and MDN nets to new format
+%
+% Description
+% NET = CONVERTOLDNET(NET) takes a network NET and, if appropriate,
+% converts it from pre-2.3 to the current format. The difference is
+% simply that in MLPs and the MLP sub-net of MDNs the field ACTFN has
+% been renamed OUTFN to make it consistent with GLM and RBF networks.
+% If the network is not old-format or an MLP or MDN it is left
+% unchanged.
+%
+% See also
+% MLP, MDN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+switch net.type
+ case 'mlp'
+ if (isfield(net, 'actfn'))
+ net.outfn = net.actfn;
+ net = rmfield(net, 'actfn');
+ end
+ case 'mdn'
+ net.mlp = convertoldnet(net.mlp);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/datread.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/datread.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,99 @@
+function [x, t, nin, nout, ndata] = datread(filename)
+%DATREAD Read data from an ascii file.
+%
+% Description
+%
+% [X, T, NIN, NOUT, NDATA] = DATREAD(FILENAME) reads from the file
+% FILENAME and returns a matrix X of input vectors, a matrix T of
+% target vectors, and integers NIN, NOUT and NDATA specifying the
+% number of inputs, the number of outputs and the number of data points
+% respectively.
+%
+% The format of the data file is as follows: the first row contains the
+% string NIN followed by the number of inputs, the second row contains
+% the string NOUT followed by the number of outputs, and the third row
+% contains the string NDATA followed by the number of data vectors.
+% Subsequent lines each contain one input vector followed by one output
+% vector, with individual values separated by spaces.
+%
+% See also
+% nin 2 nout 1 ndata 4 0.000000e+00 0.000000e+00
+% 1.000000e+00 0.000000e+00 1.000000e+00 0.000000e+00
+% 1.000000e+00 0.000000e+00 0.000000e+00 1.000000e+00
+% 1.000000e+00 1.000000e+00 See Also
+% DATWRITE
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+fid = fopen(filename, 'rt');
+if fid == -1
+ error('Failed to open file.')
+end
+
+% Read number of inputs
+s1 = fscanf(fid, '%s', 1);
+if ~strcmp(s1, 'nin')
+ fclose(fid);
+ error('String ''nin'' not found')
+end
+nin = fscanf(fid, '%d\n', 1);
+if ~isnumeric(nin)
+ fclose(fid);
+ error('No number for nin')
+end
+if nin < 0 | round(nin) ~= nin
+ fclose(fid);
+ error('nin must be a non-negative integer')
+end
+
+% Read number of outputs
+s2 = fscanf(fid, '%s', 1);
+if ~strcmp(s2, 'nout')
+ fclose(fid);
+ error('String ''nout'' not found')
+end
+nout = fscanf(fid, '%d\n', 1);
+if ~isnumeric(nout)
+ fclose(fid);
+ error('No number for nout')
+end
+if nout < 0 | round(nout) ~= nout
+ fclose(fid);
+ error('nout must be a non-negative integer')
+end
+
+% Read number of data values
+s3 = fscanf(fid, '%s', 1);
+if ~strcmp(s3, 'ndata')
+ fclose(fid);
+ error('String ''ndata'' not found')
+end
+ndata = fscanf(fid, '%d\n', 1);
+if ~isnumeric(ndata)
+ fclose(fid);
+ error('No number for ndata')
+end
+if ndata < 0 | round(ndata) ~= ndata
+ fclose(fid);
+ error('ndata must be a non-negative integer')
+end
+
+% The following line reads all of the remaining data to the end of file.
+temp = fscanf(fid, '%f', inf);
+
+% Check that size of temp is correct
+if size(temp, 1) * size(temp,2) ~= (nin+nout) * ndata
+ fclose(fid);
+ error('Incorrect number of elements in file')
+end
+
+temp = reshape(temp, nin + nout, ndata)';
+x = temp(:, 1:nin);
+t = temp(:, nin + 1 : nin + nout);
+
+flag = fclose(fid);
+if flag == -1
+ error('Failed to close file.')
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/datwrite.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/datwrite.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function datwrite(filename, x, t)
+%DATWRITE Write data to ascii file.
+%
+% Description
+%
+% DATWRITE(FILENAME, X, T) takes a matrix X of input vectors and a
+% matrix T of target vectors and writes them to an ascii file named
+% FILENAME. The file format is as follows: the first row contains the
+% string NIN followed by the number of inputs, the second row contains
+% the string NOUT followed by the number of outputs, and the third row
+% contains the string NDATA followed by the number of data vectors.
+% Subsequent lines each contain one input vector followed by one output
+% vector, with individual values separated by spaces.
+%
+% See also
+% DATREAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+nin = size(x, 2);
+nout = size(t, 2);
+ndata = size(x, 1);
+
+fid = fopen(filename, 'wt');
+if fid == -1
+ error('Failed to open file.')
+end
+
+if size(t, 1) ~= ndata
+ error('x and t must have same number of rows.');
+end
+
+fprintf(fid, ' nin %d\n nout %d\n ndata %d\n', nin , nout, ndata);
+for i = 1 : ndata
+ fprintf(fid, '%13e ', x(i,:), t(i,:));
+ fprintf(fid, '\n');
+end
+
+flag = fclose(fid);
+if flag == -1
+ error('Failed to close file.')
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/dem2ddat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/dem2ddat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,48 @@
+function [data, c, prior, sd] = dem2ddat(ndata)
+%DEM2DDAT Generates two dimensional data for demos.
+%
+% Description
+% The data is drawn from three spherical Gaussian distributions with
+% priors 0.3, 0.5 and 0.2; centres (2, 3.5), (0, 0) and (0,2); and
+% standard deviations 0.2, 0.5 and 1.0. DATA = DEM2DDAT(NDATA)
+% generates NDATA points.
+%
+% [DATA, C] = DEM2DDAT(NDATA) also returns a matrix containing the
+% centres of the Gaussian distributions.
+%
+% See also
+% DEMGMM1, DEMKMEAN, DEMKNN1
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+input_dim = 2;
+
+% Fix seed for reproducible results
+randn('state', 42);
+
+% Generate mixture of three Gaussians in two dimensional space
+data = randn(ndata, input_dim);
+
+% Priors for the three clusters
+prior(1) = 0.3;
+prior(2) = 0.5;
+prior(3) = 0.2;
+
+% Cluster centres
+c = [2.0, 3.5; 0.0, 0.0; 0.0, 2.0];
+
+% Cluster standard deviations
+sd = [0.2 0.5 1.0];
+
+% Put first cluster at (2, 3.5)
+data(1:prior(1)*ndata, 1) = data(1:prior(1)*ndata, 1) * 0.2 + c(1,1);
+data(1:prior(1)*ndata, 2) = data(1:prior(1)*ndata, 2) * 0.2 + c(1,2);
+
+% Leave second cluster at (0,0)
+data((prior(1)*ndata + 1):(prior(2)+prior(1))*ndata, :) = ...
+ data((prior(1)*ndata + 1):(prior(2)+prior(1))*ndata, :) * 0.5;
+
+% Put third cluster at (0,2)
+data((prior(1)+prior(2))*ndata +1:ndata, 2) = ...
+ data((prior(1)+prior(2))*ndata+1:ndata, 2) + c(3, 2);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demard.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demard.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,157 @@
+%DEMARD Automatic relevance determination using the MLP.
+%
+% Description
+% This script demonstrates the technique of automatic relevance
+% determination (ARD) using a synthetic problem having three input
+% variables: X1 is sampled uniformly from the range (0,1) and has a low
+% level of added Gaussian noise, X2 is a copy of X1 with a higher level
+% of added noise, and X3 is sampled randomly from a Gaussian
+% distribution. The single target variable is determined by
+% SIN(2*PI*X1) with additive Gaussian noise. Thus X1 is very relevant
+% for determining the target value, X2 is of some relevance, while X3
+% is irrelevant. The prior over weights is given by the ARD Gaussian
+% prior with a separate hyper-parameter for the group of weights
+% associated with each input. A multi-layer perceptron is trained on
+% this data, with re-estimation of the hyper-parameters using EVIDENCE.
+% The final values for the hyper-parameters reflect the relative
+% importance of the three inputs.
+%
+% See also
+% DEMMLP1, DEMEV1, MLP, EVIDENCE
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+clc;
+disp('This demonstration illustrates the technique of automatic relevance')
+disp('determination (ARD) using a multi-layer perceptron.')
+disp(' ');
+disp('First, we set up a synthetic data set involving three input variables:')
+disp('x1 is sampled uniformly from the range (0,1) and has a low level of')
+disp('added Gaussian noise, x2 is a copy of x1 with a higher level of added')
+disp('noise, and x3 is sampled randomly from a Gaussian distribution. The')
+disp('single target variable is given by t = sin(2*pi*x1) with additive')
+disp('Gaussian noise. Thus x1 is very relevant for determining the target')
+disp('value, x2 is of some relevance, while x3 should in principle be')
+disp('irrelevant.')
+disp(' ');
+disp('Press any key to see a plot of t against x1.')
+pause;
+
+% Generate the data set.
+randn('state', 0);
+rand('state', 0);
+ndata = 100;
+noise = 0.05;
+x1 = rand(ndata, 1) + 0.002*randn(ndata, 1);
+x2 = x1 + 0.02*randn(ndata, 1);
+x3 = 0.5 + 0.2*randn(ndata, 1);
+x = [x1, x2, x3];
+t = sin(2*pi*x1) + noise*randn(ndata, 1);
+
+% Plot the data and the original function.
+h = figure;
+plotvals = linspace(0, 1, 200)';
+plot(x1, t, 'ob')
+hold on
+axis([0 1 -1.5 1.5])
+[fx, fy] = fplot('sin(2*pi*x)', [0 1]);
+plot(fx, fy, '-g', 'LineWidth', 2);
+legend('data', 'function');
+
+disp(' ');
+disp('Press any key to continue')
+pause; clc;
+
+disp('The prior over weights is given by the ARD Gaussian prior with a')
+disp('separate hyper-parameter for the group of weights associated with each')
+disp('input. This prior is set up using the utility MLPPRIOR. The network is')
+disp('trained by error minimization using scaled conjugate gradient function')
+disp('SCG. There are two cycles of training, and at the end of each cycle')
+disp('the hyper-parameters are re-estimated using EVIDENCE.')
+disp(' ');
+disp('Press any key to create and train the network.')
+disp(' ');
+pause;
+
+% Set up network parameters.
+nin = 3; % Number of inputs.
+nhidden = 2; % Number of hidden units.
+nout = 1; % Number of outputs.
+aw1 = 0.01*ones(1, nin); % First-layer ARD hyperparameters.
+ab1 = 0.01; % Hyperparameter for hidden unit biases.
+aw2 = 0.01; % Hyperparameter for second-layer weights.
+ab2 = 0.01; % Hyperparameter for output unit biases.
+beta = 50.0; % Coefficient of data error.
+
+% Create and initialize network.
+prior = mlpprior(nin, nhidden, nout, aw1, ab1, aw2, ab2);
+net = mlp(nin, nhidden, nout, 'linear', prior, beta);
+
+% Set up vector of options for the optimiser.
+nouter = 2; % Number of outer loops
+ninner = 10; % Number of inner loops
+options = zeros(1,18); % Default options vector.
+options(1) = 1; % This provides display of error values.
+options(2) = 1.0e-7; % This ensures that convergence must occur
+options(3) = 1.0e-7;
+options(14) = 300; % Number of training cycles in inner loop.
+
+% Train using scaled conjugate gradients, re-estimating alpha and beta.
+for k = 1:nouter
+ net = netopt(net, options, x, t, 'scg');
+ [net, gamma] = evidence(net, x, t, ninner);
+ fprintf(1, '\n\nRe-estimation cycle %d:\n', k);
+ disp('The first three alphas are the hyperparameters for the corresponding');
+ disp('input to hidden unit weights. The remainder are the hyperparameters');
+ disp('for the hidden unit biases, second layer weights and output unit')
+ disp('biases, respectively.')
+ fprintf(1, ' alpha = %8.5f\n', net.alpha);
+ fprintf(1, ' beta = %8.5f\n', net.beta);
+ fprintf(1, ' gamma = %8.5f\n\n', gamma);
+ disp(' ')
+ disp('Press any key to continue.')
+ pause
+end
+
+% Plot the function corresponding to the trained network.
+figure(h); hold on;
+[y, z] = mlpfwd(net, plotvals*ones(1,3));
+plot(plotvals, y, '-r', 'LineWidth', 2)
+legend('data', 'function', 'network');
+
+disp('Press any key to continue.');
+pause; clc;
+
+disp('We can now read off the hyperparameter values corresponding to the')
+disp('three inputs x1, x2 and x3:')
+disp(' ');
+fprintf(1, ' alpha1: %8.5f\n', net.alpha(1));
+fprintf(1, ' alpha2: %8.5f\n', net.alpha(2));
+fprintf(1, ' alpha3: %8.5f\n', net.alpha(3));
+disp(' ');
+disp('Since each alpha corresponds to an inverse variance, we see that the')
+disp('posterior variance for weights associated with input x1 is large, that')
+disp('of x2 has an intermediate value and the variance of weights associated')
+disp('with x3 is small.')
+disp(' ')
+disp('Press any key to continue.')
+disp(' ')
+pause
+disp('This is confirmed by looking at the corresponding weight values:')
+disp(' ');
+fprintf(1, ' %8.5f %8.5f\n', net.w1');
+disp(' ');
+disp('where the three rows correspond to weights asssociated with x1, x2 and')
+disp('x3 respectively. We see that the network is giving greatest emphasis')
+disp('to x1 and least emphasis to x3, with intermediate emphasis on')
+disp('x2. Since the target t is statistically independent of x3 we might')
+disp('expect the weights associated with this input would go to')
+disp('zero. However, for any finite data set there may be some chance')
+disp('correlation between x3 and t, and so the corresponding alpha remains')
+disp('finite.')
+
+disp(' ');
+disp('Press any key to end.')
+pause; clc; close(h); clear all
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demev1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demev1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,146 @@
+%DEMEV1 Demonstrate Bayesian regression for the MLP.
+%
+% Description
+% The problem consists an input variable X which sampled from a
+% Gaussian distribution, and a target variable T generated by computing
+% SIN(2*PI*X) and adding Gaussian noise. A 2-layer network with linear
+% outputs is trained by minimizing a sum-of-squares error function with
+% isotropic Gaussian regularizer, using the scaled conjugate gradient
+% optimizer. The hyperparameters ALPHA and BETA are re-estimated using
+% the function EVIDENCE. A graph is plotted of the original function,
+% the training data, the trained network function, and the error bars.
+%
+% See also
+% EVIDENCE, MLP, SCG, DEMARD, DEMMLP1
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+clc;
+disp('This demonstration illustrates the application of Bayesian')
+disp('re-estimation to determine the hyperparameters in a simple regression')
+disp('problem. It is based on a local quadratic approximation to a mode of')
+disp('the posterior distribution and the evidence maximization framework of')
+disp('MacKay.')
+disp(' ')
+disp('First, we generate a synthetic data set consisting of a single input')
+disp('variable x sampled from a Gaussian distribution, and a target variable')
+disp('t obtained by evaluating sin(2*pi*x) and adding Gaussian noise.')
+disp(' ')
+disp('Press any key to see a plot of the data together with the sine function.')
+pause;
+
+% Generate the matrix of inputs x and targets t.
+
+ndata = 16; % Number of data points.
+noise = 0.1; % Standard deviation of noise distribution.
+randn('state', 0);
+x = 0.25 + 0.07*randn(ndata, 1);
+t = sin(2*pi*x) + noise*randn(size(x));
+
+% Plot the data and the original sine function.
+h = figure;
+nplot = 200;
+plotvals = linspace(0, 1, nplot)';
+plot(x, t, 'ok')
+xlabel('Input')
+ylabel('Target')
+hold on
+axis([0 1 -1.5 1.5])
+fplot('sin(2*pi*x)', [0 1], '-g')
+legend('data', 'function');
+
+disp(' ')
+disp('Press any key to continue')
+pause; clc;
+
+disp('Next we create a two-layer MLP network having 3 hidden units and one')
+disp('linear output. The model assumes Gaussian target noise governed by an')
+disp('inverse variance hyperparmeter beta, and uses a simple Gaussian prior')
+disp('distribution governed by an inverse variance hyperparameter alpha.')
+disp(' ');
+disp('The network weights and the hyperparameters are initialised and then')
+disp('the weights are optimized with the scaled conjugate gradient')
+disp('algorithm using the SCG function, with the hyperparameters kept')
+disp('fixed. After a maximum of 500 iterations, the hyperparameters are')
+disp('re-estimated using the EVIDENCE function. The process of optimizing')
+disp('the weights with fixed hyperparameters and then re-estimating the')
+disp('hyperparameters is repeated for a total of 3 cycles.')
+disp(' ')
+disp('Press any key to train the network and determine the hyperparameters.')
+pause;
+
+% Set up network parameters.
+nin = 1; % Number of inputs.
+nhidden = 3; % Number of hidden units.
+nout = 1; % Number of outputs.
+alpha = 0.01; % Initial prior hyperparameter.
+beta_init = 50.0; % Initial noise hyperparameter.
+
+% Create and initialize network weight vector.
+net = mlp(nin, nhidden, nout, 'linear', alpha, beta_init);
+
+% Set up vector of options for the optimiser.
+nouter = 3; % Number of outer loops.
+ninner = 1; % Number of innter loops.
+options = zeros(1,18); % Default options vector.
+options(1) = 1; % This provides display of error values.
+options(2) = 1.0e-7; % Absolute precision for weights.
+options(3) = 1.0e-7; % Precision for objective function.
+options(14) = 500; % Number of training cycles in inner loop.
+
+% Train using scaled conjugate gradients, re-estimating alpha and beta.
+for k = 1:nouter
+ net = netopt(net, options, x, t, 'scg');
+ [net, gamma] = evidence(net, x, t, ninner);
+ fprintf(1, '\nRe-estimation cycle %d:\n', k);
+ fprintf(1, ' alpha = %8.5f\n', net.alpha);
+ fprintf(1, ' beta = %8.5f\n', net.beta);
+ fprintf(1, ' gamma = %8.5f\n\n', gamma);
+ disp(' ')
+ disp('Press any key to continue.')
+ pause;
+end
+
+fprintf(1, 'true beta: %f\n', 1/(noise*noise));
+
+disp(' ')
+disp('Network training and hyperparameter re-estimation are now complete.')
+disp('Compare the final value for the hyperparameter beta with the true')
+disp('value.')
+disp(' ')
+disp('Notice that the final error value is close to the number of data')
+disp(['points (', num2str(ndata),') divided by two.'])
+disp(' ')
+disp('Press any key to continue.')
+pause; clc;
+disp('We can now plot the function represented by the trained network. This')
+disp('corresponds to the mean of the predictive distribution. We can also')
+disp('plot ''error bars'' representing one standard deviation of the')
+disp('predictive distribution around the mean.')
+disp(' ')
+disp('Press any key to add the network function and error bars to the plot.')
+pause;
+
+% Evaluate error bars.
+[y, sig2] = netevfwd(mlppak(net), net, x, t, plotvals);
+sig = sqrt(sig2);
+
+% Plot the data, the original function, and the trained network function.
+[y, z] = mlpfwd(net, plotvals);
+figure(h); hold on;
+plot(plotvals, y, '-r')
+xlabel('Input')
+ylabel('Target')
+plot(plotvals, y + sig, '-b');
+plot(plotvals, y - sig, '-b');
+legend('data', 'function', 'network', 'error bars');
+
+disp(' ')
+disp('Notice how the confidence interval spanned by the ''error bars'' is')
+disp('smaller in the region of input space where the data density is high,')
+disp('and becomes larger in regions away from the data.')
+disp(' ')
+disp('Press any key to end.')
+pause; clc; close(h);
+%clear all
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demev2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demev2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,248 @@
+%DEMEV2 Demonstrate Bayesian classification for the MLP.
+%
+% Description
+% A synthetic two class two-dimensional dataset X is sampled from a
+% mixture of four Gaussians. Each class is associated with two of the
+% Gaussians so that the optimal decision boundary is non-linear. A 2-
+% layer network with logistic outputs is trained by minimizing the
+% cross-entropy error function with isotroipc Gaussian regularizer (one
+% hyperparameter for each of the four standard weight groups), using
+% the scaled conjugate gradient optimizer. The hyperparameter vectors
+% ALPHA and BETA are re-estimated using the function EVIDENCE. A graph
+% is plotted of the optimal, regularised, and unregularised decision
+% boundaries. A further plot of the moderated versus unmoderated
+% contours is generated.
+%
+% See also
+% EVIDENCE, MLP, SCG, DEMARD, DEMMLP2
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+clc;
+
+disp('This program demonstrates the use of the evidence procedure on')
+disp('a two-class problem. It also shows the improved generalisation')
+disp('performance that can be achieved with moderated outputs; that is')
+disp('predictions where an approximate integration over the true')
+disp('posterior distribution is carried out.')
+disp(' ')
+disp('First we generate a synthetic dataset with two-dimensional input')
+disp('sampled from a mixture of four Gaussians. Each class is')
+disp('associated with two of the Gaussians so that the optimal decision')
+disp('boundary is non-linear.')
+disp(' ')
+disp('Press any key to see a plot of the data.')
+pause;
+
+% Generate the matrix of inputs x and targets t.
+
+rand('state', 423);
+randn('state', 423);
+
+ClassSymbol1 = 'r.';
+ClassSymbol2 = 'y.';
+PointSize = 12;
+titleSize = 10;
+
+fh1 = figure;
+set(fh1, 'Name', 'True Data Distribution');
+whitebg(fh1, 'k');
+
+%
+% Generate the data
+%
+n=200;
+
+% Set up mixture model: 2d data with four centres
+% Class 1 is first two centres, class 2 from the other two
+mix = gmm(2, 4, 'full');
+mix.priors = [0.25 0.25 0.25 0.25];
+mix.centres = [0 -0.1; 1.5 0; 1 1; 1 -1];
+mix.covars(:,:,1) = [0.625 -0.2165; -0.2165 0.875];
+mix.covars(:,:,2) = [0.25 0; 0 0.25];
+mix.covars(:,:,3) = [0.2241 -0.1368; -0.1368 0.9759];
+mix.covars(:,:,4) = [0.2375 0.1516; 0.1516 0.4125];
+
+[data, label] = gmmsamp(mix, n);
+
+%
+% Calculate some useful axis limits
+%
+x0 = min(data(:,1));
+x1 = max(data(:,1));
+y0 = min(data(:,2));
+y1 = max(data(:,2));
+dx = x1-x0;
+dy = y1-y0;
+expand = 5/100; % Add on 5 percent each way
+x0 = x0 - dx*expand;
+x1 = x1 + dx*expand;
+y0 = y0 - dy*expand;
+y1 = y1 + dy*expand;
+resolution = 100;
+step = dx/resolution;
+xrange = [x0:step:x1];
+yrange = [y0:step:y1];
+%
+% Generate the grid
+%
+[X Y]=meshgrid([x0:step:x1],[y0:step:y1]);
+%
+% Calculate the class conditional densities, the unconditional densities and
+% the posterior probabilities
+%
+px_j = gmmactiv(mix, [X(:) Y(:)]);
+px = reshape(px_j*(mix.priors)',size(X));
+post = gmmpost(mix, [X(:) Y(:)]);
+p1_x = reshape(post(:, 1) + post(:, 2), size(X));
+p2_x = reshape(post(:, 3) + post(:, 4), size(X));
+
+plot(data((label<=2),1),data(label<=2,2),ClassSymbol1, 'MarkerSize', ...
+PointSize)
+hold on
+axis([x0 x1 y0 y1])
+plot(data((label>2),1),data(label>2,2),ClassSymbol2, 'MarkerSize', ...
+ PointSize)
+
+% Convert targets to 0-1 encoding
+target=[label<=2];
+disp(' ')
+disp('Press any key to continue')
+pause; clc;
+
+disp('Next we create a two-layer MLP network with 6 hidden units and')
+disp('one logistic output. We use a separate inverse variance')
+disp('hyperparameter for each group of weights (inputs, input bias,')
+disp('outputs, output bias) and the weights are optimised with the')
+disp('scaled conjugate gradient algorithm. After each 100 iterations')
+disp('the hyperparameters are re-estimated twice. There are eight')
+disp('cycles of the whole algorithm.')
+disp(' ')
+disp('Press any key to train the network and determine the hyperparameters.')
+pause;
+
+% Set up network parameters.
+nin = 2; % Number of inputs.
+nhidden = 6; % Number of hidden units.
+nout = 1; % Number of outputs.
+alpha = 0.01; % Initial prior hyperparameter.
+aw1 = 0.01;
+ab1 = 0.01;
+aw2 = 0.01;
+ab2 = 0.01;
+
+% Create and initialize network weight vector.
+prior = mlpprior(nin, nhidden, nout, aw1, ab1, aw2, ab2);
+net = mlp(nin, nhidden, nout, 'logistic', prior);
+
+% Set up vector of options for the optimiser.
+nouter = 8; % Number of outer loops.
+ninner = 2; % Number of innter loops.
+options = foptions; % Default options vector.
+options(1) = 1; % This provides display of error values.
+options(2) = 1.0e-5; % Absolute precision for weights.
+options(3) = 1.0e-5; % Precision for objective function.
+options(14) = 100; % Number of training cycles in inner loop.
+
+% Train using scaled conjugate gradients, re-estimating alpha and beta.
+for k = 1:nouter
+ net = netopt(net, options, data, target, 'scg');
+ [net, gamma] = evidence(net, data, target, ninner);
+ fprintf(1, '\nRe-estimation cycle %d:\n', k);
+ disp([' alpha = ', num2str(net.alpha')]);
+ fprintf(1, ' gamma = %8.5f\n\n', gamma);
+ disp(' ')
+ disp('Press any key to continue.')
+ pause;
+end
+
+disp(' ')
+disp('Network training and hyperparameter re-estimation are now complete.')
+disp('Notice that the final error value is close to the number of data')
+disp(['points (', num2str(n), ') divided by two.'])
+disp('Also, the hyperparameter values differ, which suggests that a single')
+disp('hyperparameter would not be so effective.')
+disp(' ')
+disp('First we train an MLP without Bayesian regularisation on the')
+disp('same dataset using 400 iterations of scaled conjugate gradient')
+disp(' ')
+disp('Press any key to train the network by maximum likelihood.')
+pause;
+% Train standard network
+net2 = mlp(nin, nhidden, nout, 'logistic');
+options(14) = 400;
+net2 = netopt(net2, options, data, target, 'scg');
+y2g = mlpfwd(net2, [X(:), Y(:)]);
+y2g = reshape(y2g(:, 1), size(X));
+
+disp(' ')
+disp('We can now plot the function represented by the trained networks.')
+disp('We show the decision boundaries (output = 0.5) and the optimal')
+disp('decision boundary given by applying Bayes'' theorem to the true')
+disp('data model.')
+disp(' ')
+disp('Press any key to add the boundaries to the plot.')
+pause;
+
+% Evaluate predictions.
+[yg, ymodg] = mlpevfwd(net, data, target, [X(:) Y(:)]);
+yg = reshape(yg(:,1),size(X));
+ymodg = reshape(ymodg(:,1),size(X));
+
+% Bayesian decision boundary
+[cB, hB] = contour(xrange,yrange,p1_x,[0.5 0.5],'b-');
+[cNb, hNb] = contour(xrange,yrange,yg,[0.5 0.5],'r-');
+[cN, hN] = contour(xrange,yrange,y2g,[0.5 0.5],'g-');
+set(hB, 'LineWidth', 2);
+set(hNb, 'LineWidth', 2);
+set(hN, 'LineWidth', 2);
+Chandles = [hB(1) hNb(1) hN(1)];
+legend(Chandles, 'Bayes', ...
+ 'Reg. Network', 'Network', 3);
+
+disp(' ')
+disp('Note how the regularised network predictions are closer to the')
+disp('optimal decision boundary, while the unregularised network is')
+disp('overtrained.')
+
+disp(' ')
+disp('We will now compare moderated and unmoderated outputs for the');
+disp('regularised network by showing the contour plot of the posterior')
+disp('probability estimates.')
+disp(' ')
+disp('The first plot shows the regularised (moderated) predictions')
+disp('and the second shows the standard predictions from the same network.')
+disp('These agree at the level 0.5.')
+disp('Press any key to continue')
+pause
+levels = 0:0.1:1;
+fh4 = figure;
+set(fh4, 'Name', 'Moderated outputs');
+hold on
+plot(data((label<=2),1),data(label<=2,2),'r.', 'MarkerSize', PointSize)
+plot(data((label>2),1),data(label>2,2),'y.', 'MarkerSize', PointSize)
+
+[cNby, hNby] = contour(xrange, yrange, ymodg, levels, 'k-');
+set(hNby, 'LineWidth', 1);
+
+fh5 = figure;
+set(fh5, 'Name', 'Unmoderated outputs');
+hold on
+plot(data((label<=2),1),data(label<=2,2),'r.', 'MarkerSize', PointSize)
+plot(data((label>2),1),data(label>2,2),'y.', 'MarkerSize', PointSize)
+
+[cNbm, hNbm] = contour(xrange, yrange, yg, levels, 'k-');
+set(hNbm, 'LineWidth', 1);
+
+disp(' ')
+disp('Note how the moderated contours are more widely spaced. This shows')
+disp('that there is a larger region where the outputs are close to 0.5')
+disp('and a smaller region where the outputs are close to 0 or 1.')
+disp(' ')
+disp('Press any key to exit')
+pause
+close(fh1);
+close(fh4);
+close(fh5);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demev3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demev3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,154 @@
+%DEMEV3 Demonstrate Bayesian regression for the RBF.
+%
+% Description
+% The problem consists an input variable X which sampled from a
+% Gaussian distribution, and a target variable T generated by computing
+% SIN(2*PI*X) and adding Gaussian noise. An RBF network with linear
+% outputs is trained by minimizing a sum-of-squares error function with
+% isotropic Gaussian regularizer, using the scaled conjugate gradient
+% optimizer. The hyperparameters ALPHA and BETA are re-estimated using
+% the function EVIDENCE. A graph is plotted of the original function,
+% the training data, the trained network function, and the error bars.
+%
+% See also
+% DEMEV1, EVIDENCE, RBF, SCG, NETEVFWD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+clc;
+disp('This demonstration illustrates the application of Bayesian')
+disp('re-estimation to determine the hyperparameters in a simple regression')
+disp('problem using an RBF netowk. It is based on a the fact that the')
+disp('posterior distribution for the output weights of an RBF is Gaussian')
+disp('and uses the evidence maximization framework of MacKay.')
+disp(' ')
+disp('First, we generate a synthetic data set consisting of a single input')
+disp('variable x sampled from a Gaussian distribution, and a target variable')
+disp('t obtained by evaluating sin(2*pi*x) and adding Gaussian noise.')
+disp(' ')
+disp('Press any key to see a plot of the data together with the sine function.')
+pause;
+
+% Generate the matrix of inputs x and targets t.
+
+ndata = 16; % Number of data points.
+noise = 0.1; % Standard deviation of noise distribution.
+randn('state', 0);
+rand('state', 0);
+x = 0.25 + 0.07*randn(ndata, 1);
+t = sin(2*pi*x) + noise*randn(size(x));
+
+% Plot the data and the original sine function.
+h = figure;
+nplot = 200;
+plotvals = linspace(0, 1, nplot)';
+plot(x, t, 'ok')
+xlabel('Input')
+ylabel('Target')
+hold on
+axis([0 1 -1.5 1.5])
+fplot('sin(2*pi*x)', [0 1], '-g')
+legend('data', 'function');
+
+disp(' ')
+disp('Press any key to continue')
+pause; clc;
+
+disp('Next we create a two-layer MLP network having 3 hidden units and one')
+disp('linear output. The model assumes Gaussian target noise governed by an')
+disp('inverse variance hyperparmeter beta, and uses a simple Gaussian prior')
+disp('distribution governed by an inverse variance hyperparameter alpha.')
+disp(' ');
+disp('The network weights and the hyperparameters are initialised and then')
+disp('the output layer weights are optimized with the scaled conjugate gradient')
+disp('algorithm using the SCG function, with the hyperparameters kept')
+disp('fixed. After a maximum of 50 iterations, the hyperparameters are')
+disp('re-estimated using the EVIDENCE function. The process of optimizing')
+disp('the weights with fixed hyperparameters and then re-estimating the')
+disp('hyperparameters is repeated for a total of 3 cycles.')
+disp(' ')
+disp('Press any key to train the network and determine the hyperparameters.')
+pause;
+
+% Set up network parameters.
+nin = 1; % Number of inputs.
+nhidden = 3; % Number of hidden units.
+nout = 1; % Number of outputs.
+alpha = 0.01; % Initial prior hyperparameter.
+beta_init = 50.0; % Initial noise hyperparameter.
+
+% Create and initialize network weight vector.
+net = rbf(nin, nhidden, nout, 'tps', 'linear', alpha, beta_init);
+[net.mask, prior] = rbfprior('tps', nin, nhidden, nout, alpha, alpha);
+net = netinit(net, prior);
+
+options = foptions;
+options(14) = 5; % At most 5 EM iterations for basis functions
+options(1) = -1; % Turn off all messages
+net = rbfsetbf(net, options, x); % Initialise the basis functions
+
+% Now train the network
+nouter = 5;
+ninner = 2;
+options = foptions;
+options(1) = 1;
+options(2) = 1.0e-5; % Absolute precision for weights.
+options(3) = 1.0e-5; % Precision for objective function.
+options(14) = 50; % Number of training cycles in inner loop.
+
+% Train using scaled conjugate gradients, re-estimating alpha and beta.
+for k = 1:nouter
+ net = netopt(net, options, x, t, 'scg');
+ [net, gamma] = evidence(net, x, t, ninner);
+ fprintf(1, '\nRe-estimation cycle %d:\n', k);
+ fprintf(1, ' alpha = %8.5f\n', net.alpha);
+ fprintf(1, ' beta = %8.5f\n', net.beta);
+ fprintf(1, ' gamma = %8.5f\n\n', gamma);
+ disp(' ')
+ disp('Press any key to continue.')
+ pause;
+end
+
+fprintf(1, 'true beta: %f\n', 1/(noise*noise));
+
+disp(' ')
+disp('Network training and hyperparameter re-estimation are now complete.')
+disp('Compare the final value for the hyperparameter beta with the true')
+disp('value.')
+disp(' ')
+disp('Notice that the final error value is close to the number of data')
+disp(['points (', num2str(ndata),') divided by two.'])
+disp(' ')
+disp('Press any key to continue.')
+pause; clc;
+disp('We can now plot the function represented by the trained network. This')
+disp('corresponds to the mean of the predictive distribution. We can also')
+disp('plot ''error bars'' representing one standard deviation of the')
+disp('predictive distribution around the mean.')
+disp(' ')
+disp('Press any key to add the network function and error bars to the plot.')
+pause;
+
+% Evaluate error bars.
+[y, sig2] = netevfwd(netpak(net), net, x, t, plotvals);
+sig = sqrt(sig2);
+
+% Plot the data, the original function, and the trained network function.
+[y, z] = rbffwd(net, plotvals);
+figure(h); hold on;
+plot(plotvals, y, '-r')
+xlabel('Input')
+ylabel('Target')
+plot(plotvals, y + sig, '-b');
+plot(plotvals, y - sig, '-b');
+legend('data', 'function', 'network', 'error bars');
+
+disp(' ')
+disp('Notice how the confidence interval spanned by the ''error bars'' is')
+disp('smaller in the region of input space where the data density is high,')
+disp('and becomes larger in regions away from the data.')
+disp(' ')
+disp('Press any key to end.')
+pause; clc; close(h);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgauss.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgauss.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,80 @@
+%DEMGAUSS Demonstrate sampling from Gaussian distributions.
+%
+% Description
+%
+% DEMGAUSS provides a simple illustration of the generation of data
+% from Gaussian distributions. It first samples from a one-dimensional
+% distribution using RANDN, and then plots a normalized histogram
+% estimate of the distribution using HISTP together with the true
+% density calculated using GAUSS.
+%
+% DEMGAUSS then demonstrates sampling from a Gaussian distribution in
+% two dimensions. It creates a mean vector and a covariance matrix, and
+% then plots contours of constant density using the function GAUSS. A
+% sample of points drawn from this distribution, obtained using the
+% function GSAMP, is then superimposed on the contours.
+%
+% See also
+% GAUSS, GSAMP, HISTP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+clc
+mean = 2; var = 5; nsamp = 3000;
+xmin = -10; xmax = 10; nbins = 30;
+disp('Demonstration of sampling from a uni-variate Gaussian with mean')
+dstring = [num2str(mean), ' and variance ', num2str(var), '. ', ...
+ num2str(nsamp), ' samples are taken.'];
+disp(dstring);
+x = mean + sqrt(var)*randn(nsamp, 1);
+fh1 = figure;
+histp(x, xmin, xmax, nbins);
+hold on;
+axis([xmin xmax 0 0.2]);
+plotvals = linspace(xmin, xmax, 200)';
+probs = gauss(mean, var, plotvals);
+plot(plotvals, probs, '-r');
+xlabel('X')
+ylabel('Density')
+
+disp(' ')
+disp('Press any key to continue')
+pause;
+mu = [3 2];
+lam1 = 0.5;
+lam2 = 5.0;
+Sigma = lam1*[1,1]'*[1,1] + lam2*[1,-1]'*[1,-1];
+disp(' ')
+disp('Demonstration of sampling from a bi-variate Gaussian. The mean is')
+dstring = ['[', num2str(mu(1)), ', ', num2str(mu(2)), ...
+ '] and the covariance matrix is'];
+disp(dstring)
+disp(Sigma);
+ngrid = 40;
+cmin = -5; cmax = 10;
+cvals = linspace(cmin, cmax, ngrid);
+[X1, X2] = meshgrid(cvals, cvals);
+XX = [X1(:), X2(:)];
+probs = gauss(mu, Sigma, XX);
+probs = reshape(probs, ngrid, ngrid);
+
+fh2 = figure;
+contour(X1, X2, probs, 'b');
+hold on
+
+nsamp = 300;
+dstring = [num2str(nsamp), ' samples are generated.'];
+disp('The plot shows the sampled data points with a contour plot of their density.')
+samples = gsamp(mu, Sigma, nsamp);
+plot(samples(:,1), samples(:,2), 'or');
+xlabel('X1')
+ylabel('X2')
+grid off;
+
+disp(' ')
+disp('Press any key to end')
+pause;
+close(fh1);
+close(fh2);
+clear all;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demglm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demglm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,95 @@
+%DEMGLM1 Demonstrate simple classification using a generalized linear model.
+%
+% Description
+% The problem consists of a two dimensional input matrix DATA and a
+% vector of classifications T. The data is generated from two
+% Gaussian clusters, and a generalized linear model with logistic
+% output is trained using iterative reweighted least squares. A plot of
+% the data together with the 0.1, 0.5 and 0.9 contour lines of the
+% conditional probability is generated.
+%
+% See also
+% DEMGLM2, GLM, GLMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Generate data from two classes in 2d
+input_dim = 2;
+
+% Fix seeds for reproducible results
+randn('state', 42);
+rand('state', 42);
+
+ndata = 100;
+% Generate mixture of two Gaussians in two dimensional space
+mix = gmm(2, 2, 'spherical');
+mix.priors = [0.4 0.6]; % Cluster priors
+mix.centres = [2.0, 2.0; 0.0, 0.0]; % Cluster centres
+mix.covars = [0.5, 1.0];
+
+[data, label] = gmmsamp(mix, ndata);
+targets = label - ones(ndata, 1);
+
+% Plot the result
+
+clc
+disp('This demonstration illustrates the use of a generalized linear model')
+disp('to classify data from two classes in a two-dimensional space. We')
+disp('begin by generating and plotting the data.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+fh1 = figure;
+plot(data(label==1,1), data(label==1,2), 'bo');
+hold on
+axis([-4 5 -4 5])
+set(gca, 'box', 'on')
+plot(data(label==2,1), data(label==2,2), 'rx')
+title('Data')
+
+clc
+disp('Now we fit a model consisting of a logistic sigmoid function of')
+disp('a linear combination of the input variables.')
+disp(' ')
+disp('The model is trained using the IRLS algorithm for 5 iterations')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+net = glm(input_dim, 1, 'logistic');
+options = foptions;
+options(1) = 1;
+options(14) = 5;
+net = glmtrain(net, options, data, targets);
+
+disp(' ')
+disp('We now plot some density contours given by this model.')
+disp('The contour labelled 0.5 is the decision boundary.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+x = -4.0:0.2:5.0;
+y = -4.0:0.2:5.0;
+[X, Y] = meshgrid(x,y);
+X = X(:);
+Y = Y(:);
+grid = [X Y];
+Z = glmfwd(net, grid);
+Z = reshape(Z, length(x), length(y));
+v = [0.1 0.5 0.9];
+[c, h] = contour(x, y, Z, v);
+title('Generalized Linear Model')
+set(h, 'linewidth', 3)
+clabel(c, h);
+
+clc
+disp('Note that the contours of constant density are straight lines.')
+disp(' ')
+disp('Press any key to end.')
+pause
+close(fh1);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demglm2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demglm2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,103 @@
+%DEMGLM2 Demonstrate simple classification using a generalized linear model.
+%
+% Description
+% The problem consists of a two dimensional input matrix DATA and a
+% vector of classifications T. The data is generated from three
+% Gaussian clusters, and a generalized linear model with softmax output
+% is trained using iterative reweighted least squares. A plot of the
+% data together with regions shaded by the classification given by the
+% network is generated.
+%
+% See also
+% DEMGLM1, GLM, GLMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Generate data from three classes in 2d
+input_dim = 2;
+
+% Fix seeds for reproducible results
+randn('state', 42);
+rand('state', 42);
+
+ndata = 100;
+% Generate mixture of three Gaussians in two dimensional space
+mix = gmm(2, 3, 'spherical');
+mix.priors = [0.4 0.3 0.3]; % Cluster priors
+mix.centres = [2, 2; 0.0, 0.0; 1, -1]; % Cluster centres
+mix.covars = [0.5 1.0 0.6];
+
+[data, label] = gmmsamp(mix, ndata);
+id = eye(3);
+targets = id(label,:);
+
+% Plot the result
+
+clc
+disp('This demonstration illustrates the use of a generalized linear model')
+disp('to classify data from three classes in a two-dimensional space. We')
+disp('begin by generating and plotting the data.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+fh1 = figure;
+plot(data(label==1,1), data(label==1,2), 'bo');
+hold on
+axis([-4 5 -4 5]);
+set(gca, 'Box', 'on')
+plot(data(label==2,1), data(label==2,2), 'rx')
+plot(data(label==3, 1), data(label==3, 2), 'go')
+title('Data')
+
+clc
+disp('Now we fit a model consisting of a softmax function of')
+disp('a linear combination of the input variables.')
+disp(' ')
+disp('The model is trained using the IRLS algorithm for up to 10 iterations')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+net = glm(input_dim, size(targets, 2), 'softmax');
+options = foptions;
+options(1) = 1;
+options(14) = 10;
+net = glmtrain(net, options, data, targets);
+
+disp(' ')
+disp('We now plot the decision regions given by this model.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+x = -4.0:0.2:5.0;
+y = -4.0:0.2:5.0;
+[X, Y] = meshgrid(x,y);
+X = X(:);
+Y = Y(:);
+grid = [X Y];
+Z = glmfwd(net, grid);
+[foo , class] = max(Z');
+class = class';
+colors = ['b.'; 'r.'; 'g.'];
+for i = 1:3
+ thisX = X(class == i);
+ thisY = Y(class == i);
+ h = plot(thisX, thisY, colors(i,:));
+ set(h, 'MarkerSize', 8);
+end
+title('Plot of Decision regions')
+
+hold off
+
+clc
+disp('Note that the boundaries of decision regions are straight lines.')
+disp(' ')
+disp('Press any key to end.')
+pause
+close(fh1);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgmm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgmm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,173 @@
+%DEMGMM1 Demonstrate EM for Gaussian mixtures.
+%
+% Description
+% This script demonstrates the use of the EM algorithm to fit a mixture
+% of Gaussians to a set of data using maximum likelihood. A colour
+% coding scheme is used to illustrate the evaluation of the posterior
+% probabilities in the E-step of the EM algorithm.
+%
+% See also
+% DEMGMM2, DEMGMM3, DEMGMM4, GMM, GMMEM, GMMPOST
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+clc;
+disp('This demonstration illustrates the use of the EM (expectation-')
+disp('maximization) algorithm for fitting of a mixture of Gaussians to a')
+disp('data set by maximum likelihood.')
+disp(' ')
+disp('The data set consists of 40 data points in a 2-dimensional')
+disp('space, generated by sampling from a mixture of 2 Gaussian')
+disp('distributions.')
+disp(' ')
+disp('Press any key to see a plot of the data.')
+pause;
+
+% Generate the data
+randn('state', 0); rand('state', 0);
+gmix = gmm(2, 2, 'spherical');
+ndat1 = 20; ndat2 = 20; ndata = ndat1+ndat2;
+gmix.centres = [0.3 0.3; 0.7 0.7];
+gmix.covars = [0.01 0.01];
+x = gmmsamp(gmix, ndata);
+
+h = figure;
+hd = plot(x(:, 1), x(:, 2), '.g', 'markersize', 30);
+hold on; axis([0 1 0 1]); axis square; set(gca, 'box', 'on');
+ht = text(0.5, 1.05, 'Data', 'horizontalalignment', 'center');
+disp(' ');
+disp('Press any key to continue.')
+pause; clc;
+
+disp('We next create and initialize a mixture model consisting of a mixture')
+disp('of 2 Gaussians having ''spherical'' covariance matrices, using the')
+disp('function GMM. The Gaussian components can be displayed on the same')
+disp('plot as the data by drawing a contour of constant probability density')
+disp('for each component having radius equal to the corresponding standard')
+disp('deviation. Component 1 is coloured red and component 2 is coloured')
+disp('blue.')
+disp(' ')
+disp('Note that a particulary poor choice of initial parameters has been')
+disp('made in order to illustrate more effectively the operation of the')
+disp('EM algorithm.')
+disp(' ')
+disp('Press any key to see the initial configuration of the mixture model.')
+pause;
+
+% Set up mixture model
+ncentres = 2; input_dim = 2;
+mix = gmm(input_dim, ncentres, 'spherical');
+
+% Initialise the mixture model
+mix.centres = [0.2 0.8; 0.8, 0.2];
+mix.covars = [0.01 0.01];
+
+% Plot the initial model
+ncirc = 30; theta = linspace(0, 2*pi, ncirc);
+xs = cos(theta); ys = sin(theta);
+xvals = mix.centres(:, 1)*ones(1,ncirc) + sqrt(mix.covars')*xs;
+yvals = mix.centres(:, 2)*ones(1,ncirc) + sqrt(mix.covars')*ys;
+hc(1)=line(xvals(1,:), yvals(1,:), 'color', 'r');
+hc(2)=line(xvals(2,:), yvals(2,:), 'color', 'b');
+set(ht, 'string', 'Initial Configuration');
+figure(h);
+disp(' ')
+disp('Press any key to continue');
+pause; clc;
+
+disp('Now we adapt the parameters of the mixture model iteratively using the')
+disp('EM algorithm. Each cycle of the EM algorithm consists of an E-step')
+disp('followed by an M-step. We start with the E-step, which involves the')
+disp('evaluation of the posterior probabilities (responsibilities) which the')
+disp('two components have for each of the data points.')
+disp(' ')
+disp('Since we have labelled the two components using the colours red and')
+disp('blue, a convenient way to indicate the value of a posterior')
+disp('probability for a given data point is to colour the point using a')
+disp('scale ranging from pure red (corresponding to a posterior probability')
+disp('of 1.0 for the red component and 0.0 for the blue component) through')
+disp('to pure blue.')
+disp(' ')
+disp('Press any key to see the result of applying the first E-step.')
+pause;
+
+% Initial E-step.
+set(ht, 'string', 'E-step');
+post = gmmpost(mix, x);
+dcols = [post(:,1), zeros(ndata, 1), post(:,2)];
+delete(hd);
+for i = 1 : ndata
+ hd(i) = plot(x(i, 1), x(i, 2), 'color', dcols(i,:), ...
+ 'marker', '.', 'markersize', 30);
+end
+figure(h);
+
+disp(' ');
+disp('Press any key to continue')
+pause; clc;
+
+disp('Next we perform the corresponding M-step. This involves replacing the')
+disp('centres of the component Gaussians by the corresponding weighted means')
+disp('of the data. Thus the centre of the red component is replaced by the')
+disp('mean of the data set, in which each data point is weighted according to')
+disp('the amount of red ink (corresponding to the responsibility of')
+disp('component 1 for explaining that data point). The variances and mixing')
+disp('proportions of the two components are similarly re-estimated.')
+disp(' ')
+disp('Press any key to see the result of applying the first M-step.')
+pause;
+
+% M-step.
+set(ht, 'string', 'M-step');
+options = foptions;
+options(14) = 1; % A single iteration
+options(1) = -1; % Switch off all messages, including warning
+mix = gmmem(mix, x, options);
+delete(hc);
+xvals = mix.centres(:, 1)*ones(1,ncirc) + sqrt(mix.covars')*xs;
+yvals = mix.centres(:, 2)*ones(1,ncirc) + sqrt(mix.covars')*ys;
+hc(1)=line(xvals(1,:), yvals(1,:), 'color', 'r');
+hc(2)=line(xvals(2,:), yvals(2,:), 'color', 'b');
+figure(h);
+disp(' ')
+disp('Press any key to continue')
+pause; clc;
+
+disp('We can continue making alternate E and M steps until the changes in')
+disp('the log likelihood at each cycle become sufficiently small.')
+disp(' ')
+disp('Press any key to see an animation of a further 9 EM cycles.')
+pause;
+figure(h);
+
+% Loop over EM iterations.
+numiters = 9;
+for n = 1 : numiters
+
+ set(ht, 'string', 'E-step');
+ post = gmmpost(mix, x);
+ dcols = [post(:,1), zeros(ndata, 1), post(:,2)];
+ delete(hd);
+ for i = 1 : ndata
+ hd(i) = plot(x(i, 1), x(i, 2), 'color', dcols(i,:), ...
+ 'marker', '.', 'markersize', 30);
+ end
+ pause(1)
+
+ set(ht, 'string', 'M-step');
+ [mix, options] = gmmem(mix, x, options);
+ fprintf(1, 'Cycle %4d Error %11.6f\n', n, options(8));
+ delete(hc);
+ xvals = mix.centres(:, 1)*ones(1,ncirc) + sqrt(mix.covars')*xs;
+ yvals = mix.centres(:, 2)*ones(1,ncirc) + sqrt(mix.covars')*ys;
+ hc(1)=line(xvals(1,:), yvals(1,:), 'color', 'r');
+ hc(2)=line(xvals(2,:), yvals(2,:), 'color', 'b');
+ pause(1)
+
+end
+
+disp(' ');
+disp('Press any key to end.')
+pause; clc; close(h); clear all
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgmm2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgmm2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,152 @@
+%DEMGMM1 Demonstrate density modelling with a Gaussian mixture model.
+%
+% Description
+% The problem consists of modelling data generated by a mixture of
+% three Gaussians in 2 dimensions. The priors are 0.3, 0.5 and 0.2;
+% the centres are (2, 3.5), (0, 0) and (0,2); the variances are 0.2,
+% 0.5 and 1.0. The first figure contains a scatter plot of the data.
+%
+% A Gaussian mixture model with three components is trained using EM.
+% The parameter vector is printed before training and after training.
+% The user should press any key to continue at these points. The
+% parameter vector consists of priors (the column), centres (given as
+% (x, y) pairs as the next two columns), and variances (the last
+% column).
+%
+% The second figure is a 3 dimensional view of the density function,
+% while the third shows the 1-standard deviation circles for the three
+% components of the mixture model.
+%
+% See also
+% GMM, GMMINIT, GMMEM, GMMPROB, GMMUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Generate the data
+% Fix seeds for reproducible results
+randn('state', 42);
+rand('state', 42);
+
+ndata = 500;
+[data, datac, datap, datasd] = dem2ddat(ndata);
+
+clc
+disp('This demonstration illustrates the use of a Gaussian mixture model')
+disp('to approximate the unconditional probability density of data in')
+disp('a two-dimensional space. We begin by generating the data from')
+disp('a mixture of three Gaussians and plotting it.')
+disp(' ')
+disp('Press any key to continue')
+pause
+
+fh1 = figure;
+plot(data(:, 1), data(:, 2), 'o')
+set(gca, 'Box', 'on')
+% Set up mixture model
+ncentres = 3;
+input_dim = 2;
+mix = gmm(input_dim, ncentres, 'spherical');
+
+options = foptions;
+options(14) = 5; % Just use 5 iterations of k-means in initialisation
+% Initialise the model parameters from the data
+mix = gmminit(mix, data, options);
+
+clc
+disp('The data is drawn from a mixture with parameters')
+disp(' Priors Centres Variances')
+disp([datap' datac (datasd.^2)'])
+disp(' ')
+disp('The mixture model has three components and spherical covariance')
+disp('matrices. The model parameters after initialisation using the')
+disp('k-means algorithm are as follows')
+% Print out model
+disp(' Priors Centres Variances')
+disp([mix.priors' mix.centres mix.covars'])
+disp('Press any key to continue')
+pause
+
+% Set up vector of options for EM trainer
+options = zeros(1, 18);
+options(1) = 1; % Prints out error values.
+options(14) = 10; % Max. Number of iterations.
+
+disp('We now train the model using the EM algorithm for 10 iterations')
+disp(' ')
+disp('Press any key to continue')
+pause
+[mix, options, errlog] = gmmem(mix, data, options);
+
+% Print out model
+disp(' ')
+disp('The trained model has parameters ')
+disp(' Priors Centres Variances')
+disp([mix.priors' mix.centres mix.covars'])
+disp('Note the close correspondence between these parameters and those')
+disp('of the distribution used to generate the data, which are repeated here.')
+disp(' Priors Centres Variances')
+disp([datap' datac (datasd.^2)'])
+disp(' ')
+disp('Press any key to continue')
+pause
+
+clc
+disp('We now plot the density given by the mixture model as a surface plot')
+disp(' ')
+disp('Press any key to continue')
+pause
+% Plot the result
+x = -4.0:0.2:5.0;
+y = -4.0:0.2:5.0;
+[X, Y] = meshgrid(x,y);
+X = X(:);
+Y = Y(:);
+grid = [X Y];
+Z = gmmprob(mix, grid);
+Z = reshape(Z, length(x), length(y));
+c = mesh(x, y, Z);
+hold on
+title('Surface plot of probability density')
+hold off
+
+clc
+disp('The final plot shows the centres and widths, given by one standard')
+disp('deviation, of the three components of the mixture model.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Try to calculate a sensible position for the second figure, below the first
+fig1_pos = get(fh1, 'Position');
+fig2_pos = fig1_pos;
+fig2_pos(2) = fig2_pos(2) - fig1_pos(4);
+fh2 = figure;
+set(fh2, 'Position', fig2_pos)
+
+hp1 = plot(data(:, 1), data(:, 2), 'bo');
+axis('equal');
+hold on
+hp2 = plot(mix.centres(:, 1), mix.centres(:,2), 'g+');
+set(hp2, 'MarkerSize', 10);
+set(hp2, 'LineWidth', 3);
+
+title('Plot of data and mixture centres')
+angles = 0:pi/30:2*pi;
+for i = 1 : mix.ncentres
+ x_circle = mix.centres(i,1)*ones(1, length(angles)) + ...
+ sqrt(mix.covars(i))*cos(angles);
+ y_circle = mix.centres(i,2)*ones(1, length(angles)) + ...
+ sqrt(mix.covars(i))*sin(angles);
+ plot(x_circle, y_circle, 'r')
+end
+hold off
+disp('Note how the data cluster positions and widths are captured by')
+disp('the mixture model.')
+disp(' ')
+disp('Press any key to end.')
+pause
+
+close(fh1);
+close(fh2);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgmm3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgmm3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,192 @@
+%DEMGMM3 Demonstrate density modelling with a Gaussian mixture model.
+%
+% Description
+% The problem consists of modelling data generated by a mixture of
+% three Gaussians in 2 dimensions with a mixture model using diagonal
+% covariance matrices. The priors are 0.3, 0.5 and 0.2; the centres
+% are (2, 3.5), (0, 0) and (0,2); the covariances are all axis aligned
+% (0.16, 0.64), (0.25, 1) and the identity matrix. The first figure
+% contains a scatter plot of the data.
+%
+% A Gaussian mixture model with three components is trained using EM.
+% The parameter vector is printed before training and after training.
+% The user should press any key to continue at these points. The
+% parameter vector consists of priors (the column), and centres (given
+% as (x, y) pairs as the next two columns). The diagonal entries of
+% the covariance matrices are printed separately.
+%
+% The second figure is a 3 dimensional view of the density function,
+% while the third shows the axes of the 1-standard deviation circles
+% for the three components of the mixture model.
+%
+% See also
+% GMM, GMMINIT, GMMEM, GMMPROB, GMMUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Generate the data
+ndata = 500;
+
+% Fix the seeds for reproducible results
+randn('state', 42);
+rand('state', 42);
+data = randn(ndata, 2);
+prior = [0.3 0.5 0.2];
+% Mixture model swaps clusters 1 and 3
+datap = [0.2 0.5 0.3];
+datac = [0 2; 0 0; 2 3.5];
+datacov = [1 1;1 0.25; 0.4*0.4 0.8*0.8];
+data1 = data(1:prior(1)*ndata,:);
+data2 = data(prior(1)*ndata+1:(prior(2)+prior(1))*ndata, :);
+data3 = data((prior(1)+prior(2))*ndata +1:ndata, :);
+
+% First cluster has axis aligned variance and centre (2, 3.5)
+data1(:, 1) = data1(:, 1)*0.4 + 2.0;
+data1(:, 2) = data1(:, 2)*0.8 + 3.5;
+
+% Second cluster has axis aligned variance and centre (0, 0)
+data2(:,2) = data2(:, 2)*0.5;
+
+% Third cluster is at (0,2) with identity matrix for covariance
+data3 = data3 + repmat([0 2], prior(3)*ndata, 1);
+
+% Put the dataset together again
+data = [data1; data2; data3];
+
+clc
+disp('This demonstration illustrates the use of a Gaussian mixture model')
+disp('with diagonal covariance matrices to approximate the unconditional')
+disp('probability density of data in a two-dimensional space.')
+disp('We begin by generating the data from a mixture of three Gaussians')
+disp('with axis aligned covariance structure and plotting it.')
+disp(' ')
+disp('The first cluster has centre (0, 2).')
+disp('The second cluster has centre (0, 0).')
+disp('The third cluster has centre (2, 3.5).')
+disp(' ')
+disp('Press any key to continue')
+pause
+
+fh1 = figure;
+plot(data(:, 1), data(:, 2), 'o')
+set(gca, 'Box', 'on')
+
+% Set up mixture model
+ncentres = 3;
+input_dim = 2;
+mix = gmm(input_dim, ncentres, 'diag');
+
+options = foptions;
+options(14) = 5; % Just use 5 iterations of k-means in initialisation
+% Initialise the model parameters from the data
+mix = gmminit(mix, data, options);
+
+% Print out model
+disp('The mixture model has three components and diagonal covariance')
+disp('matrices. The model parameters after initialisation using the')
+disp('k-means algorithm are as follows')
+disp(' Priors Centres')
+disp([mix.priors' mix.centres])
+disp('Covariance diagonals are')
+disp(mix.covars)
+disp('Press any key to continue.')
+pause
+
+% Set up vector of options for EM trainer
+options = zeros(1, 18);
+options(1) = 1; % Prints out error values.
+options(14) = 20; % Number of iterations.
+
+disp('We now train the model using the EM algorithm for 20 iterations.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+[mix, options, errlog] = gmmem(mix, data, options);
+
+% Print out model
+disp(' ')
+disp('The trained model has priors and centres:')
+disp(' Priors Centres')
+disp([mix.priors' mix.centres])
+disp('The data generator has priors and centres')
+disp(' Priors Centres')
+disp([datap' datac])
+disp('Model covariance diagonals are')
+disp(mix.covars)
+disp('Data generator covariance diagonals are')
+disp(datacov)
+disp('Note the close correspondence between these parameters and those')
+disp('of the distribution used to generate the data.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+clc
+disp('We now plot the density given by the mixture model as a surface plot.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Plot the result
+x = -4.0:0.2:5.0;
+y = -4.0:0.2:5.0;
+[X, Y] = meshgrid(x,y);
+X = X(:);
+Y = Y(:);
+grid = [X Y];
+Z = gmmprob(mix, grid);
+Z = reshape(Z, length(x), length(y));
+c = mesh(x, y, Z);
+hold on
+title('Surface plot of probability density')
+hold off
+drawnow
+
+clc
+disp('The final plot shows the centres and widths, given by one standard')
+disp('deviation, of the three components of the mixture model. The axes')
+disp('of the ellipses of constant density are shown.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Try to calculate a sensible position for the second figure, below the first
+fig1_pos = get(fh1, 'Position');
+fig2_pos = fig1_pos;
+fig2_pos(2) = fig2_pos(2) - fig1_pos(4);
+fh2 = figure('Position', fig2_pos);
+
+h = plot(data(:, 1), data(:, 2), 'bo');
+hold on
+axis('equal');
+title('Plot of data and covariances')
+for i = 1:ncentres
+ v = [1 0];
+ for j = 1:2
+ start=mix.centres(i,:)-sqrt(mix.covars(i,:).*v);
+ endpt=mix.centres(i,:)+sqrt(mix.covars(i,:).*v);
+ linex = [start(1) endpt(1)];
+ liney = [start(2) endpt(2)];
+ line(linex, liney, 'Color', 'k', 'LineWidth', 3)
+ v = [0 1];
+ end
+ % Plot ellipses of one standard deviation
+ theta = 0:0.02:2*pi;
+ x = sqrt(mix.covars(i,1))*cos(theta) + mix.centres(i,1);
+ y = sqrt(mix.covars(i,2))*sin(theta) + mix.centres(i,2);
+ plot(x, y, 'r-');
+end
+hold off
+
+disp('Note how the data cluster positions and widths are captured by')
+disp('the mixture model.')
+disp(' ')
+disp('Press any key to end.')
+pause
+
+close(fh1);
+close(fh2);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgmm4.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgmm4.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,210 @@
+%DEMGMM4 Demonstrate density modelling with a Gaussian mixture model.
+%
+% Description
+% The problem consists of modelling data generated by a mixture of
+% three Gaussians in 2 dimensions with a mixture model using full
+% covariance matrices. The priors are 0.3, 0.5 and 0.2; the centres
+% are (2, 3.5), (0, 0) and (0,2); the variances are (0.16, 0.64) axis
+% aligned, (0.25, 1) rotated by 30 degrees and the identity matrix. The
+% first figure contains a scatter plot of the data.
+%
+% A Gaussian mixture model with three components is trained using EM.
+% The parameter vector is printed before training and after training.
+% The user should press any key to continue at these points. The
+% parameter vector consists of priors (the column), and centres (given
+% as (x, y) pairs as the next two columns). The covariance matrices
+% are printed separately.
+%
+% The second figure is a 3 dimensional view of the density function,
+% while the third shows the axes of the 1-standard deviation ellipses
+% for the three components of the mixture model.
+%
+% See also
+% GMM, GMMINIT, GMMEM, GMMPROB, GMMUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Generate the data
+
+ndata = 500;
+
+% Fix the seeds for reproducible results
+randn('state', 42);
+rand('state', 42);
+data = randn(ndata, 2);
+prior = [0.3 0.5 0.2];
+% Mixture model swaps clusters 1 and 3
+datap = [0.2 0.5 0.3];
+datac = [0 2; 0 0; 2 3.5];
+datacov = repmat(eye(2), [1 1 3]);
+data1 = data(1:prior(1)*ndata,:);
+data2 = data(prior(1)*ndata+1:(prior(2)+prior(1))*ndata, :);
+data3 = data((prior(1)+prior(2))*ndata +1:ndata, :);
+
+% First cluster has axis aligned variance and centre (2, 3.5)
+data1(:, 1) = data1(:, 1)*0.4 + 2.0;
+data1(:, 2) = data1(:, 2)*0.8 + 3.5;
+datacov(:, :, 3) = [0.4*0.4 0; 0 0.8*0.8];
+
+% Second cluster has variance axes rotated by 30 degrees and centre (0, 0)
+rotn = [cos(pi/6) -sin(pi/6); sin(pi/6) cos(pi/6)];
+data2(:,1) = data2(:, 1)*0.5;
+data2 = data2*rotn;
+datacov(:, :, 2) = rotn' * [0.25 0; 0 1] * rotn;
+
+% Third cluster is at (0,2)
+data3 = data3 + repmat([0 2], prior(3)*ndata, 1);
+
+% Put the dataset together again
+data = [data1; data2; data3];
+
+clc
+disp('This demonstration illustrates the use of a Gaussian mixture model')
+disp('with full covariance matrices to approximate the unconditional ')
+disp('probability density of data in a two-dimensional space.')
+disp('We begin by generating the data from a mixture of three Gaussians and')
+disp('plotting it.')
+disp(' ')
+disp('The first cluster has axis aligned variance and centre (0, 2).')
+disp('The second cluster has variance axes rotated by 30 degrees')
+disp('and centre (0, 0). The third cluster has unit variance and centre')
+disp('(2, 3.5).')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+fh1 = figure;
+plot(data(:, 1), data(:, 2), 'o')
+set(gca, 'Box', 'on')
+
+% Set up mixture model
+ncentres = 3;
+input_dim = 2;
+mix = gmm(input_dim, ncentres, 'full');
+
+% Initialise the model parameters from the data
+options = foptions;
+options(14) = 5; % Just use 5 iterations of k-means in initialisation
+mix = gmminit(mix, data, options);
+
+% Print out model
+clc
+disp('The mixture model has three components and full covariance')
+disp('matrices. The model parameters after initialisation using the')
+disp('k-means algorithm are as follows')
+disp(' Priors Centres')
+disp([mix.priors' mix.centres])
+disp('Covariance matrices are')
+disp(mix.covars)
+disp('Press any key to continue.')
+pause
+
+% Set up vector of options for EM trainer
+options = zeros(1, 18);
+options(1) = 1; % Prints out error values.
+options(14) = 50; % Number of iterations.
+
+disp('We now train the model using the EM algorithm for 50 iterations.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+[mix, options, errlog] = gmmem(mix, data, options);
+
+% Print out model
+disp(' ')
+disp('The trained model has priors and centres:')
+disp(' Priors Centres')
+disp([mix.priors' mix.centres])
+disp('The data generator has priors and centres')
+disp(' Priors Centres')
+disp([datap' datac])
+disp('Model covariance matrices are')
+disp(mix.covars(:, :, 1))
+disp(mix.covars(:, :, 2))
+disp(mix.covars(:, :, 3))
+disp('Data generator covariance matrices are')
+disp(datacov(:, :, 1))
+disp(datacov(:, :, 2))
+disp(datacov(:, :, 3))
+disp('Note the close correspondence between these parameters and those')
+disp('of the distribution used to generate the data. The match for')
+disp('covariance matrices is not that close, but would be improved with')
+disp('more iterations of the training algorithm.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+clc
+disp('We now plot the density given by the mixture model as a surface plot.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Plot the result
+x = -4.0:0.2:5.0;
+y = -4.0:0.2:5.0;
+[X, Y] = meshgrid(x,y);
+X = X(:);
+Y = Y(:);
+grid = [X Y];
+Z = gmmprob(mix, grid);
+Z = reshape(Z, length(x), length(y));
+c = mesh(x, y, Z);
+hold on
+title('Surface plot of probability density')
+hold off
+drawnow
+
+clc
+disp('The final plot shows the centres and widths, given by one standard')
+disp('deviation, of the three components of the mixture model. The axes')
+disp('of the ellipses of constant density are shown.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Try to calculate a sensible position for the second figure, below the first
+fig1_pos = get(fh1, 'Position');
+fig2_pos = fig1_pos;
+fig2_pos(2) = fig2_pos(2) - fig1_pos(4) - 30;
+fh2 = figure('Position', fig2_pos);
+
+h3 = plot(data(:, 1), data(:, 2), 'bo');
+axis equal;
+hold on
+title('Plot of data and covariances')
+for i = 1:ncentres
+ [v,d] = eig(mix.covars(:,:,i));
+ for j = 1:2
+ % Ensure that eigenvector has unit length
+ v(:,j) = v(:,j)/norm(v(:,j));
+ start=mix.centres(i,:)-sqrt(d(j,j))*(v(:,j)');
+ endpt=mix.centres(i,:)+sqrt(d(j,j))*(v(:,j)');
+ linex = [start(1) endpt(1)];
+ liney = [start(2) endpt(2)];
+ line(linex, liney, 'Color', 'k', 'LineWidth', 3)
+ end
+ % Plot ellipses of one standard deviation
+ theta = 0:0.02:2*pi;
+ x = sqrt(d(1,1))*cos(theta);
+ y = sqrt(d(2,2))*sin(theta);
+ % Rotate ellipse axes
+ ellipse = (v*([x; y]))';
+ % Adjust centre
+ ellipse = ellipse + ones(length(theta), 1)*mix.centres(i,:);
+ plot(ellipse(:,1), ellipse(:,2), 'r-');
+end
+hold off
+
+disp('Note how the data cluster positions and widths are captured by')
+disp('the mixture model.')
+disp(' ')
+disp('Press any key to end.')
+pause
+
+close(fh1);
+close(fh2);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgmm5.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgmm5.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,147 @@
+%DEMGMM5 Demonstrate density modelling with a PPCA mixture model.
+%
+% Description
+% The problem consists of modelling data generated by a mixture of
+% three Gaussians in 2 dimensions with a mixture model using full
+% covariance matrices. The priors are 0.3, 0.5 and 0.2; the centres
+% are (2, 3.5), (0, 0) and (0,2); the variances are (0.16, 0.64) axis
+% aligned, (0.25, 1) rotated by 30 degrees and the identity matrix. The
+% first figure contains a scatter plot of the data.
+%
+% A mixture model with three one-dimensional PPCA components is trained
+% using EM. The parameter vector is printed before training and after
+% training. The parameter vector consists of priors (the column), and
+% centres (given as (x, y) pairs as the next two columns).
+%
+% The second figure is a 3 dimensional view of the density function,
+% while the third shows the axes of the 1-standard deviation ellipses
+% for the three components of the mixture model together with the one
+% standard deviation along the principal component of each mixture
+% model component.
+%
+% See also
+% GMM, GMMINIT, GMMEM, GMMPROB, PPCA
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+ndata = 500;
+data = randn(ndata, 2);
+prior = [0.3 0.5 0.2];
+% Mixture model swaps clusters 1 and 3
+datap = [0.2 0.5 0.3];
+datac = [0 2; 0 0; 2 3.5];
+datacov = repmat(eye(2), [1 1 3]);
+data1 = data(1:prior(1)*ndata,:);
+data2 = data(prior(1)*ndata+1:(prior(2)+prior(1))*ndata, :);
+data3 = data((prior(1)+prior(2))*ndata +1:ndata, :);
+
+% First cluster has axis aligned variance and centre (2, 3.5)
+data1(:, 1) = data1(:, 1)*0.1 + 2.0;
+data1(:, 2) = data1(:, 2)*0.8 + 3.5;
+datacov(:, :, 3) = [0.1*0.1 0; 0 0.8*0.8];
+
+% Second cluster has variance axes rotated by 30 degrees and centre (0, 0)
+rotn = [cos(pi/6) -sin(pi/6); sin(pi/6) cos(pi/6)];
+data2(:,1) = data2(:, 1)*0.2;
+data2 = data2*rotn;
+datacov(:, :, 2) = rotn' * [0.04 0; 0 1] * rotn;
+
+% Third cluster is at (0,2)
+data3(:, 2) = data3(:, 2)*0.1;
+data3 = data3 + repmat([0 2], prior(3)*ndata, 1);
+
+% Put the dataset together again
+data = [data1; data2; data3];
+
+ndata = 100; % Number of data points.
+noise = 0.2; % Standard deviation of noise distribution.
+x = [0:1/(2*(ndata - 1)):0.5]';
+randn('state', 1);
+rand('state', 1);
+t = sin(2*pi*x) + noise*randn(ndata, 1);
+
+% Fit three one-dimensional PPCA models
+ncentres = 3;
+ppca_dim = 1;
+
+clc
+disp('This demonstration illustrates the use of a Gaussian mixture model')
+disp('with a probabilistic PCA covariance structure to approximate the')
+disp('unconditional probability density of data in a two-dimensional space.')
+disp('We begin by generating the data from a mixture of three Gaussians and')
+disp('plotting it.')
+disp(' ')
+disp('The first cluster has axis aligned variance and centre (0, 2).')
+disp('The variance parallel to the x-axis is significantly greater')
+disp('than that parallel to the y-axis.')
+disp('The second cluster has variance axes rotated by 30 degrees')
+disp('and centre (0, 0). The third cluster has significant variance')
+disp('parallel to the y-axis and centre (2, 3.5).')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+fh1 = figure;
+plot(data(:, 1), data(:, 2), 'o')
+set(gca, 'Box', 'on')
+axis equal
+hold on
+
+mix = gmm(2, ncentres, 'ppca', ppca_dim);
+options = foptions;
+options(14) = 10;
+options(1) = -1; % Switch off all warnings
+
+% Just use 10 iterations of k-means in initialisation
+% Initialise the model parameters from the data
+mix = gmminit(mix, data, options);
+disp('The mixture model has three components with 1-dimensional')
+disp('PPCA subspaces. The model parameters after initialisation using')
+disp('the k-means algorithm are as follows')
+disp(' Priors Centres')
+disp([mix.priors' mix.centres])
+disp(' ')
+disp('Press any key to continue')
+pause
+
+options(1) = 1; % Prints out error values.
+options(14) = 30; % Number of iterations.
+
+disp('We now train the model using the EM algorithm for up to 30 iterations.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+[mix, options, errlog] = gmmem(mix, data, options);
+disp('The trained model has priors and centres:')
+disp(' Priors Centres')
+disp([mix.priors' mix.centres])
+
+% Now plot the result
+for i = 1:ncentres
+ % Plot the PC vectors
+ v = mix.U(:,:,i);
+ start=mix.centres(i,:)-sqrt(mix.lambda(i))*(v');
+ endpt=mix.centres(i,:)+sqrt(mix.lambda(i))*(v');
+ linex = [start(1) endpt(1)];
+ liney = [start(2) endpt(2)];
+ line(linex, liney, 'Color', 'k', 'LineWidth', 3)
+ % Plot ellipses of one standard deviation
+ theta = 0:0.02:2*pi;
+ x = sqrt(mix.lambda(i))*cos(theta);
+ y = sqrt(mix.covars(i))*sin(theta);
+ % Rotate ellipse axes
+ rot_matrix = [v(1) -v(2); v(2) v(1)];
+ ellipse = (rot_matrix*([x; y]))';
+ % Adjust centre
+ ellipse = ellipse + ones(length(theta), 1)*mix.centres(i,:);
+ plot(ellipse(:,1), ellipse(:,2), 'r-')
+end
+
+disp(' ')
+disp('Press any key to exit')
+pause
+close (fh1);
+clear all;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgp.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgp.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,160 @@
+%DEMGP Demonstrate simple regression using a Gaussian Process.
+%
+% Description
+% The problem consists of one input variable X and one target variable
+% T. The values in X are chosen in two separated clusters and the
+% target data is generated by computing SIN(2*PI*X) and adding Gaussian
+% noise. Two Gaussian Processes, each with different covariance
+% functions are trained by optimising the hyperparameters using the
+% scaled conjugate gradient algorithm. The final predictions are
+% plotted together with 2 standard deviation error bars.
+%
+% See also
+% GP, GPERR, GPFWD, GPGRAD, GPINIT, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Find out if flops is available (i.e. pre-version 6 Matlab)
+v = version;
+if (str2num(strtok(v, '.')) >= 6)
+ flops_works = logical(0);
+else
+ flops_works = logical(1);
+end
+
+randn('state', 42);
+x = [0.1 0.15 0.2 0.25 0.65 0.7 0.75 0.8 0.85 0.9]';
+ndata = length(x);
+t = sin(2*pi*x) + 0.05*randn(ndata, 1);
+
+xtest = linspace(0, 1, 50)';
+
+clc
+disp('This demonstration illustrates the use of a Gaussian Process')
+disp('model for regression problems. The data is generated from a noisy')
+disp('sine function.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+flops(0);
+% Initialise the parameters.
+net = gp(1, 'sqexp');
+prior.pr_mean = 0;
+prior.pr_var = 1;
+net = gpinit(net, x, t, prior);
+
+clc
+disp('The first GP uses the squared exponential covariance function.')
+disp('The hyperparameters are initialised by sampling from a Gaussian with a')
+disp(['mean of ', num2str(prior.pr_mean), ' and variance ', ...
+ num2str(prior.pr_var), '.'])
+disp('After initializing the network, we train it using the scaled conjugate')
+disp('gradients algorithm for 20 cycles.')
+disp(' ')
+disp('Press any key to continue')
+pause
+
+% Now train to find the hyperparameters.
+options = foptions;
+options(1) = 1; % Display training error values
+options(14) = 20;
+flops(0)
+[net, options] = netopt(net, options, x, t, 'scg');
+if flops_works
+ sflops = flops;
+end
+
+disp('The second GP uses the rational quadratic covariance function.')
+disp('The hyperparameters are initialised by sampling from a Gaussian with a')
+disp(['mean of ', num2str(prior.pr_mean), ' and variance ', num2str(prior.pr_var)])
+disp('After initializing the network, we train it using the scaled conjugate')
+disp('gradients algorithm for 20 cycles.')
+disp(' ')
+disp('Press any key to continue')
+pause
+flops(0)
+net2 = gp(1, 'ratquad');
+net2 = gpinit(net2, x, t, prior);
+flops(0)
+[net2, options] = netopt(net2, options, x, t, 'scg');
+if flops_works
+ rflops = flops;
+end
+
+disp(' ')
+disp('Press any key to continue')
+disp(' ')
+pause
+clc
+
+fprintf(1, 'For squared exponential covariance function,');
+if flops_works
+ fprintf(1, 'flops = %d', sflops);
+end
+fprintf(1, '\nfinal hyperparameters:\n')
+format_string = strcat(' bias:\t\t\t%10.6f\n noise:\t\t%10.6f\n', ...
+ ' inverse lengthscale:\t%10.6f\n vertical scale:\t%10.6f\n');
+fprintf(1, format_string, ...
+ exp(net.bias), exp(net.noise), exp(net.inweights(1)), exp(net.fpar(1)));
+fprintf(1, '\n\nFor rational quadratic covariance function,');
+if flops_works
+ fprintf(1, 'flops = %d', rflops);
+end
+fprintf(1, '\nfinal hyperparameters:\n')
+format_string = [format_string ' cov decay order:\t%10.6f\n'];
+fprintf(1, format_string, ...
+ exp(net2.bias), exp(net2.noise), exp(net2.inweights(1)), ...
+ exp(net2.fpar(1)), exp(net2.fpar(2)));
+disp(' ')
+disp('Press any key to continue')
+pause
+
+disp(' ')
+disp('Now we plot the data, underlying function, model outputs and two')
+disp('standard deviation error bars on a single graph to compare the results.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+cn = gpcovar(net, x);
+cninv = inv(cn);
+[ytest, sigsq] = gpfwd(net, xtest, cninv);
+sig = sqrt(sigsq);
+
+fh1 = figure;
+hold on
+plot(x, t, 'ok');
+xlabel('Input')
+ylabel('Target')
+fplot('sin(2*pi*x)', [0 1], '--m');
+plot(xtest, ytest, '-k');
+plot(xtest, ytest+(2*sig), '-b', xtest, ytest-(2*sig), '-b');
+axis([0 1 -1.5 1.5]);
+title('Squared exponential covariance function')
+legend('data', 'function', 'GP', 'error bars');
+hold off
+
+cninv2 = inv(gpcovar(net2, x));
+[ytest2, sigsq2] = gpfwd(net2, xtest, cninv2);
+sig2 = sqrt(sigsq2);
+fh2 = figure;
+hold on
+plot(x, t, 'ok');
+xlabel('Input')
+ylabel('Target')
+fplot('sin(2*pi*x)', [0 1], '--m');
+plot(xtest, ytest2, '-k');
+plot(xtest, ytest2+(2*sig2), '-b', xtest, ytest2-(2*sig2), '-b');
+axis([0 1 -1.5 1.5]);
+title('Rational quadratic covariance function')
+legend('data', 'function', 'GP', 'error bars');
+hold off
+
+disp(' ')
+disp('Press any key to end.')
+pause
+close(fh1);
+close(fh2);
+clear all;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgpard.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgpard.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,126 @@
+%DEMGPARD Demonstrate ARD using a Gaussian Process.
+%
+% Description
+% The data consists of three input variables X1, X2 and X3, and one
+% target variable T. The target data is generated by computing
+% SIN(2*PI*X1) and adding Gaussian noise, x2 is a copy of x1 with a
+% higher level of added noise, and x3 is sampled randomly from a
+% Gaussian distribution. A Gaussian Process, is trained by optimising
+% the hyperparameters using the scaled conjugate gradient algorithm.
+% The final values of the hyperparameters show that the model
+% successfully identifies the importance of each input.
+%
+% See also
+% DEMGP, GP, GPERR, GPFWD, GPGRAD, GPINIT, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+clc;
+randn('state', 1729);
+rand('state', 1729);
+disp('This demonstration illustrates the technique of automatic relevance')
+disp('determination (ARD) using a Gaussian Process.')
+disp(' ');
+disp('First, we set up a synthetic data set involving three input variables:')
+disp('x1 is sampled uniformly from the range (0,1) and has a low level of')
+disp('added Gaussian noise, x2 is a copy of x1 with a higher level of added')
+disp('noise, and x3 is sampled randomly from a Gaussian distribution. The')
+disp('single target variable is given by t = sin(2*pi*x1) with additive')
+disp('Gaussian noise. Thus x1 is very relevant for determining the target')
+disp('value, x2 is of some relevance, while x3 should in principle be')
+disp('irrelevant.')
+disp(' ');
+disp('Press any key to see a plot of t against x1.')
+pause;
+
+ndata = 100;
+x1 = rand(ndata, 1);
+x2 = x1 + 0.05*randn(ndata, 1);
+x3 = 0.5 + 0.5*randn(ndata, 1);
+x = [x1, x2, x3];
+t = sin(2*pi*x1) + 0.1*randn(ndata, 1);
+
+% Plot the data and the original function.
+h = figure;
+plotvals = linspace(0, 1, 200)';
+plot(x1, t, 'ob')
+hold on
+xlabel('Input x1')
+ylabel('Target')
+axis([0 1 -1.5 1.5])
+[fx, fy] = fplot('sin(2*pi*x)', [0 1]);
+plot(fx, fy, '-g', 'LineWidth', 2);
+legend('data', 'function');
+
+disp(' ');
+disp('Press any key to continue')
+pause; clc;
+
+disp('The Gaussian Process has a separate hyperparameter for each input.')
+disp('The hyperparameters are trained by error minimisation using the scaled.')
+disp('conjugate gradient optimiser.')
+disp(' ');
+disp('Press any key to create and train the model.')
+disp(' ');
+pause;
+
+net = gp(3, 'sqexp');
+% Initialise the parameters.
+prior.pr_mean = 0;
+prior.pr_var = 0.1;
+net = gpinit(net, x, t, prior);
+
+% Now train to find the hyperparameters.
+options = foptions;
+options(1) = 1;
+options(14) = 30;
+
+[net, options] = netopt(net, options, x, t, 'scg');
+
+rel = exp(net.inweights);
+
+fprintf(1, ...
+ '\nFinal hyperparameters:\n\n bias:\t\t%10.6f\n noise:\t%10.6f\n', ...
+ exp(net.bias), exp(net.noise));
+fprintf(1, ' Vertical scale: %8.6f\n', exp(net.fpar(1)));
+fprintf(1, ' Input 1:\t%10.6f\n Input 2:\t%10.6f\n', ...
+ rel(1), rel(2));
+fprintf(1, ' Input 3:\t%10.6f\n\n', rel(3));
+disp(' ');
+disp('We see that the inverse lengthscale associated with')
+disp('input x1 is large, that of x2 has an intermediate value and the variance')
+disp('of weights associated with x3 is small.')
+disp(' ');
+disp('This implies that the Gaussian Process is giving greatest emphasis')
+disp('to x1 and least emphasis to x3, with intermediate emphasis on')
+disp('x2 in the covariance function.')
+disp(' ')
+disp('Since the target t is statistically independent of x3 we might')
+disp('expect the weights associated with this input would go to')
+disp('zero. However, for any finite data set there may be some chance')
+disp('correlation between x3 and t, and so the corresponding hyperparameter remains')
+disp('finite.')
+disp('Press any key to continue.')
+pause
+
+disp('Finally, we plot the output of the Gaussian Process along the line')
+disp('x1 = x2 = x3, together with the true underlying function.')
+xt = linspace(0, 1, 50);
+xtest = [xt', xt', xt'];
+
+cn = gpcovar(net, x);
+cninv = inv(cn);
+[ytest, sigsq] = gpfwd(net, xtest, cninv);
+sig = sqrt(sigsq);
+
+figure(h); hold on;
+plot(xt, ytest, '-k');
+plot(xt, ytest+(2*sig), '-b', xt, ytest-(2*sig), '-b');
+axis([0 1 -1.5 1.5]);
+fplot('sin(2*pi*x)', [0 1], '--m');
+
+disp(' ');
+disp('Press any key to end.')
+pause; clc; close(h); clear all
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgpot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgpot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function g = demgpot(x, mix)
+%DEMGPOT Computes the gradient of the negative log likelihood for a mixture model.
+%
+% Description
+% This function computes the gradient of the negative log of the
+% unconditional data density P(X) with respect to the coefficients of
+% the data vector X for a Gaussian mixture model. The data structure
+% MIX defines the mixture model, while the matrix X contains the data
+% vector as a row vector. Note the unusual order of the arguments: this
+% is so that the function can be used in DEMHMC1 directly for sampling
+% from the distribution P(X).
+%
+% See also
+% DEMHMC1, DEMMET1, DEMPOT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Computes the potential gradient
+
+temp = (ones(mix.ncentres,1)*x)-mix.centres;
+temp = temp.*(gmmactiv(mix,x)'*ones(1, mix.nin));
+% Assume spherical covariance structure
+if ~strcmp(mix.covar_type, 'spherical')
+ error('Spherical covariance only.')
+end
+temp = temp./(mix.covars'*ones(1, mix.nin));
+temp = temp.*(mix.priors'*ones(1, mix.nin));
+g = sum(temp, 1)/gmmprob(mix, x);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgtm1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgtm1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,147 @@
+%DEMGTM1 Demonstrate EM for GTM.
+%
+% Description
+% This script demonstrates the use of the EM algorithm to fit a one-
+% dimensional GTM to a two-dimensional set of data using maximum
+% likelihood. The location and spread of the Gaussian kernels in the
+% data space is shown during training.
+%
+% See also
+% DEMGTM2, GTM, GTMEM, GTMPOST
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Demonstrates the GTM with a 2D target space and a 1D latent space.
+%
+% This script generates a simple data set in 2 dimensions,
+% with an intrinsic dimensionality of 1, and trains a GTM
+% with a 1-dimensional latent variable to model this data
+% set, visually illustrating the training process
+%
+% Synopsis: gtm_demo
+
+% Generate and plot a 2D data set
+
+data_min = 0.15;
+data_max = 3.05;
+T = [data_min:0.05:data_max]';
+T = [T (T + 1.25*sin(2*T))];
+fh1 = figure;
+plot(T(:,1), T(:,2), 'ro');
+axis([data_min-0.05 data_max+0.05 data_min-0.05 data_max+0.05]);
+clc;
+disp('This demonstration shows in detail how the EM algorithm works')
+disp('for training a GTM with a one dimensional latent space.')
+disp(' ')
+fprintf([...
+'The figure shows data generated by feeding a 1D uniform distribution\n', ...
+'(on the X-axis) through a non-linear function (y = x + 1.25*sin(2*x))\n', ...
+'\nPress any key to continue ...\n\n']);
+pause;
+
+% Generate a unit circle figure, to be used for plotting
+src = [0:(2*pi)/(20-1):2*pi]';
+unitC = [sin(src) cos(src)];
+
+% Generate and plot (along with the data) an initial GTM model
+
+clc;
+num_latent_points = 20;
+num_rbf_centres = 5;
+
+net = gtm(1, num_latent_points, 2, num_rbf_centres, 'gaussian');
+
+options = zeros(1, 18);
+options(7) = 1;
+net = gtminit(net, options, T, 'regular', num_latent_points, ...
+ num_rbf_centres);
+
+mix = gtmfwd(net);
+% Replot the figure
+hold off;
+plot(mix.centres(:,1), mix.centres(:,2), 'g');
+hold on;
+for i=1:num_latent_points
+ c = 2*unitC*sqrt(mix.covars(1)) + [ones(20,1)*mix.centres(i,1) ...
+ ones(num_latent_points,1)*mix.centres(i,2)];
+ fill(c(:,1), c(:,2), [0.8 1 0.8]);
+end
+plot(T(:,1), T(:,2), 'ro');
+plot(mix.centres(:,1), mix.centres(:,2), 'g+');
+plot(mix.centres(:,1), mix.centres(:,2), 'g');
+axis([data_min-0.05 data_max+0.05 data_min-0.05 data_max+0.05]);
+drawnow;
+title('Initial configuration');
+disp(' ')
+fprintf([...
+'The figure shows the starting point for the GTM, before the training.\n', ...
+'A discrete latent variable distribution of %d points in 1 dimension \n', ...
+'is mapped to the 1st principal component of the target data by an RBF.\n', ...
+'with %d basis functions. Each of the %d points defines the centre of\n', ...
+'a Gaussian in a Gaussian mixture, marked by the green ''+''-signs. The\n', ...
+'mixture components all have equal variance, illustrated by the filled\n', ...
+'circle around each ''+''-sign, the radii corresponding to 2 standard\n', ...
+'deviations. The ''+''-signs are connected with a line according to their\n', ...
+'corresponding ordering in latent space.\n\n', ...
+'Press any key to begin training ...\n\n'], num_latent_points, ...
+num_rbf_centres, num_latent_points);
+pause;
+
+figure(fh1);
+%%%% Train the GTM and plot it (along with the data) as training proceeds %%%%
+options = foptions;
+options(1) = -1; % Turn off all warning messages
+options(14) = 1;
+for j = 1:15
+ [net, options] = gtmem(net, T, options);
+ hold off;
+ mix = gtmfwd(net);
+ plot(mix.centres(:,1), mix.centres(:,2), 'g');
+ hold on;
+ for i=1:20
+ c = 2*unitC*sqrt(mix.covars(1)) + [ones(20,1)*mix.centres(i,1) ...
+ ones(20,1)*mix.centres(i,2)];
+ fill(c(:,1), c(:,2), [0.8 1.0 0.8]);
+ end
+ plot(T(:,1), T(:,2), 'ro');
+ plot(mix.centres(:,1), mix.centres(:,2), 'g+');
+ plot(mix.centres(:,1), mix.centres(:,2), 'g');
+ axis([0 3.5 0 3.5]);
+ title(['After ', int2str(j),' iterations of training.']);
+ drawnow;
+ if (j == 4)
+ fprintf([...
+'The GTM initially adapts relatively quickly - already after \n', ...
+'4 iterations of training, a rough fit is attained.\n\n', ...
+'Press any key to continue training ...\n\n']);
+pause;
+figure(fh1);
+ elseif (j == 8)
+ fprintf([...
+'After another 4 iterations of training: from now on further \n', ...
+'training only makes small changes to the mapping, which combined with \n', ...
+'decrements of the Gaussian mixture variance, optimize the fit in \n', ...
+'terms of likelihood.\n\n', ...
+'Press any key to continue training ...\n\n']);
+pause;
+figure(fh1);
+ else
+ pause(1);
+ end
+end
+
+clc;
+fprintf([...
+'After 15 iterations of training the GTM can be regarded as converged. \n', ...
+'Is has been adapted to fit the target data distribution as well \n', ...
+'as possible, given prior smoothness constraints on the mapping. It \n', ...
+'captures the fact that the probabilty density is higher at the two \n', ...
+'bends of the curve, and lower towards its end points.\n\n']);
+disp(' ');
+disp('Press any key to exit.');
+pause;
+
+close(fh1);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demgtm2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demgtm2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,194 @@
+%DEMGTM2 Demonstrate GTM for visualisation.
+%
+% Description
+% This script demonstrates the use of a GTM with a two-dimensional
+% latent space to visualise data in a higher dimensional space. This is
+% done through the use of the mean responsibility and magnification
+% factors.
+%
+% See also
+% DEMGTM1, GTM, GTMEM, GTMPOST
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Fix seeds for reproducible results
+rand('state', 420);
+randn('state', 420);
+
+ndata = 300
+clc;
+disp('This demonstration shows how a Generative Topographic Mapping')
+disp('can be used to model and visualise high dimensional data. The')
+disp('data is generated from a mixture of two spherical Gaussians in')
+dstring = ['four dimensional space. ', num2str(ndata), ...
+ ' data points are generated.'];
+disp(dstring);
+disp(' ');
+disp('Press any key to continue.')
+pause
+% Create data
+data_dim = 4;
+latent_dim = 2;
+mix = gmm(data_dim, 2, 'spherical');
+mix.centres = [1 1 1 1; 0 0 0 0];
+mix.priors = [0.5 0.5];
+mix.covars = [0.1 0.1];
+
+[data, labels] = gmmsamp(mix, ndata);
+
+latent_shape = [15 15]; % Number of latent points in each dimension
+nlatent = prod(latent_shape); % Number of latent points
+num_rbf_centres = 16;
+
+clc;
+dstring = ['Next we generate and initialise the GTM. There are ',...
+ num2str(nlatent), ' latent points'];
+disp(dstring);
+dstring = ['arranged in a square of ', num2str(latent_shape(1)), ...
+ ' points on a side. There are ', num2str(num_rbf_centres), ...
+ ' centres in the'];
+disp(dstring);
+disp('RBF model, which has Gaussian activation functions.')
+disp(' ')
+disp('Once the model is created, the latent data sample')
+disp('and RBF centres are placed uniformly in the square [-1 1 -1 1].')
+disp('The output weights of the RBF are computed to map the latent');
+disp('space to the two dimensional PCA subspace of the data.');
+disp(' ')
+disp('Press any key to continue.');
+pause;
+
+% Create and initialise GTM model
+net = gtm(latent_dim, nlatent, data_dim, num_rbf_centres, ...
+ 'gaussian', 0.1);
+
+options = foptions;
+options(1) = -1;
+options(7) = 1; % Set width factor of RBF
+net = gtminit(net, options, data, 'regular', latent_shape, [4 4]);
+
+options = foptions;
+options(14) = 30;
+options(1) = 1;
+
+clc;
+dstring = ['We now train the model with ', num2str(options(14)), ...
+ ' iterations of'];
+disp(dstring)
+disp('the EM algorithm for the GTM.')
+disp(' ')
+disp('Press any key to continue.')
+pause;
+
+[net, options] = gtmem(net, data, options);
+
+disp(' ')
+disp('Press any key to continue.')
+pause;
+
+clc;
+disp('We now visualise the data by plotting, for each data point,');
+disp('the posterior mean and mode (in latent space). These give');
+disp('a summary of the entire posterior distribution in latent space.')
+disp('The corresponding values are joined by a line to aid the')
+disp('interpretation.')
+disp(' ')
+disp('Press any key to continue.');
+pause;
+% Plot posterior means
+means = gtmlmean(net, data);
+modes = gtmlmode(net, data);
+PointSize = 12;
+ClassSymbol1 = 'r.';
+ClassSymbol2 = 'b.';
+fh1 = figure;
+hold on;
+title('Visualisation in latent space')
+plot(means((labels==1),1), means(labels==1,2), ...
+ ClassSymbol1, 'MarkerSize', PointSize)
+plot(means((labels>1),1),means(labels>1,2),...
+ ClassSymbol2, 'MarkerSize', PointSize)
+
+ClassSymbol1 = 'ro';
+ClassSymbol2 = 'bo';
+plot(modes(labels==1,1), modes(labels==1,2), ...
+ ClassSymbol1)
+plot(modes(labels>1,1),modes(labels>1,2),...
+ ClassSymbol2)
+
+% Join up means and modes
+for n = 1:ndata
+ plot([means(n,1); modes(n,1)], [means(n,2); modes(n,2)], 'g-')
+end
+% Place legend outside data plot
+legend('Mean (class 1)', 'Mean (class 2)', 'Mode (class 1)',...
+ 'Mode (class 2)', -1);
+
+% Display posterior for a data point
+% Choose an interesting one with a large distance between mean and
+% mode
+[distance, point] = max(sum((means-modes).^2, 2));
+resp = gtmpost(net, data(point, :));
+
+disp(' ')
+disp('For more detailed information, the full posterior distribution')
+disp('(or responsibility) can be plotted in latent space for a')
+disp('single data point. This point has been chosen as the one')
+disp('with the largest distance between mean and mode.')
+disp(' ')
+disp('Press any key to continue.');
+pause;
+
+R = reshape(resp, fliplr(latent_shape));
+XL = reshape(net.X(:,1), fliplr(latent_shape));
+YL = reshape(net.X(:,2), fliplr(latent_shape));
+
+fh2 = figure;
+imagesc(net.X(:, 1), net.X(:,2), R);
+hold on;
+tstr = ['Responsibility for point ', num2str(point)];
+title(tstr);
+set(gca,'YDir','normal')
+colormap(hot);
+colorbar
+disp(' ');
+disp('Press any key to continue.')
+pause
+
+clc
+disp('Finally, we visualise the data with the posterior means in')
+disp('latent space as before, but superimpose the magnification')
+disp('factors to highlight the separation between clusters.')
+disp(' ')
+disp('Note the large magnitude factors down the centre of the')
+disp('graph, showing that the manifold is stretched more in')
+disp('this region than within each of the two clusters.')
+ClassSymbol1 = 'g.';
+ClassSymbol2 = 'b.';
+
+fh3 = figure;
+mags = gtmmag(net, net.X);
+% Reshape into grid form
+Mags = reshape(mags, fliplr(latent_shape));
+imagesc(net.X(:, 1), net.X(:,2), Mags);
+hold on
+title('Dataset visualisation with magnification factors')
+set(gca,'YDir','normal')
+colormap(hot);
+colorbar
+hold on; % Else the magnification plot disappears
+plot(means(labels==1,1), means(labels==1,2), ...
+ ClassSymbol1, 'MarkerSize', PointSize)
+plot(means(labels>1,1), means(labels>1,2), ...
+ ClassSymbol2, 'MarkerSize', PointSize)
+
+disp(' ')
+disp('Press any key to exit.')
+pause
+
+close(fh1);
+close(fh2);
+close(fh3);
+clear all;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demhint.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demhint.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function demhint(nin, nhidden, nout)
+%DEMHINT Demonstration of Hinton diagram for 2-layer feed-forward network.
+%
+% Description
+%
+% DEMHINT plots a Hinton diagram for a 2-layer feedforward network with
+% 5 inputs, 4 hidden units and 3 outputs. The weight vector is chosen
+% from a Gaussian distribution as described under MLP.
+%
+% DEMHINT(NIN, NHIDDEN, NOUT) allows the user to specify the number of
+% inputs, hidden units and outputs.
+%
+% See also
+% HINTON, HINTMAT, MLP, MLPPAK, MLPUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargin < 1 nin = 5; end
+if nargin < 2 nhidden = 7; end
+if nargin < 3 nout = 3; end
+
+% Fix the seed for reproducible results
+randn('state', 42);
+clc
+disp('This demonstration illustrates the plotting of Hinton diagrams')
+disp('for Multi-Layer Perceptron networks.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+net = mlp(nin, nhidden, nout, 'linear');
+
+[h1, h2] = mlphint(net);
+clc
+disp('The MLP has been created with')
+disp([' ' int2str(nin) ' inputs'])
+disp([' ' int2str(nhidden) ' hidden units'])
+disp([' ' int2str(nout) ' outputs'])
+disp(' ')
+disp('One figure is produced for each layer of weights.')
+disp('For each layer the fan-in weights are arranged in rows for each unit.')
+disp('The bias weight is separated from the rest by a red vertical line.')
+disp('The area of each box is proportional to the weight value: positive')
+disp('values are white, and negative are black.')
+disp(' ')
+disp('Press any key to exit.');
+pause;
+delete(h1);
+delete(h2);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demhmc1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demhmc1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,145 @@
+%DEMHMC1 Demonstrate Hybrid Monte Carlo sampling on mixture of two Gaussians.
+%
+% Description
+% The problem consists of generating data from a mixture of two
+% Gaussians in two dimensions using a hybrid Monte Carlo algorithm with
+% persistence. A mixture model is then fitted to the sample to compare
+% it with the true underlying generator.
+%
+% See also
+% DEMHMC3, HMC, DEMPOT, DEMGPOT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+dim = 2; % Data dimension
+ncentres = 2; % Number of centres in mixture model
+
+seed = 42; % Seed for random weight initialization.
+randn('state', seed);
+rand('state', seed);
+
+clc
+disp('This demonstration illustrates the use of the hybrid Monte Carlo')
+disp('algorithm to sample from a mixture of two Gaussians.')
+disp('The means of the two components are [0 0] and [2 2].')
+disp(' ')
+disp('First we set up the parameters of the mixture model we are sampling')
+disp('from.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Set up mixture model to sample from
+mix = gmm(dim, ncentres, 'spherical');
+mix.centres(1, :) = [0 0];
+mix.centres(2, :) = [2 2];
+x = [0 1]; % Start vector
+
+% Set up vector of options for hybrid Monte Carlo.
+
+nsamples = 160; % Number of retained samples.
+
+options = foptions; % Default options vector.
+options(1) = 1; % Switch on diagnostics.
+options(5) = 1; % Use persistence
+options(7) = 50; % Number of steps in trajectory.
+options(14) = nsamples; % Number of Monte Carlo samples returned.
+options(15) = 30; % Number of samples omitted at start of chain.
+options(18) = 0.02;
+
+clc
+disp(['Next we take ', num2str(nsamples),' samples from the distribution.'...
+ , 'The first ', num2str(options(15))])
+disp('samples at the start of the chain are omitted. As persistence')
+disp('is used, the momentum has a small random component added at each step.')
+disp([num2str(options(7)), ...
+ ' iterations are used at each step and the step size is ',...
+ num2str(options(18))])
+disp('Sampling starts at the point [0 1].')
+disp('The new state is accepted if the threshold value is greater than')
+disp('a random number between 0 and 1.')
+disp(' ')
+disp('Negative step numbers indicate samples discarded from the start of the')
+disp('chain.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+[samples, energies] = hmc('dempot', x, options, 'demgpot', mix);
+
+disp(' ')
+disp('Press any key to continue.')
+pause
+clc
+disp('The plot shows the samples generated by the HMC function.')
+disp('The different colours are used to show how the samples move from')
+disp('one component to the other over time.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+probs = exp(-energies);
+fh1 = figure;
+% Plot data in 4 groups
+ngroups = 4;
+g1end = floor(nsamples/ngroups);
+g2end = floor(2*nsamples/ngroups);
+g3end = floor(3*nsamples/ngroups);
+p1 = plot(samples(1:g1end,1), samples(1:g1end,2), 'k.', 'MarkerSize', 12);
+hold on
+lstrings = char(['Samples 1-' int2str(g1end)], ...
+ ['Samples ' int2str(g1end+1) '-' int2str(g2end)], ...
+ ['Samples ' int2str(g2end+1) '-' int2str(g3end)], ...
+ ['Samples ' int2str(g3end+1) '-' int2str(nsamples)]);
+p2 = plot(samples(g1end+1:g2end,1), samples(g1end+1:g2end,2), ...
+ 'r.', 'MarkerSize', 12);
+p3 = plot(samples(g2end+1:g3end,1), samples(g2end+1:g3end,2), ...
+ 'g.', 'MarkerSize', 12);
+p4 = plot(samples(g3end+1:nsamples,1), samples(g3end+1:nsamples,2), ...
+ 'b.', 'MarkerSize', 12);
+legend([p1 p2 p3 p4], lstrings, 2);
+
+clc
+disp('We now fit a Gaussian mixture model to the sampled data.')
+disp('The model has spherical covariance structure and the correct')
+disp('number of components.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Fit a mixture model to the sample
+newmix = gmm(dim, ncentres, 'spherical');
+options = foptions;
+options(1) = -1; % Switch off all diagnostics
+options(14) = 5; % Just use 5 iterations of k-means in initialisation
+% Initialise the model parameters from the samples
+newmix = gmminit(newmix, samples, options);
+
+% Set up vector of options for EM trainer
+options = zeros(1, 18);
+options(1) = 1; % Prints out error values.
+options(14) = 15; % Max. Number of iterations.
+
+disp('We now train the model using the EM algorithm for 15 iterations')
+disp(' ')
+disp('Press any key to continue')
+pause
+[newmix, options, errlog] = gmmem(newmix, samples, options);
+
+% Print out model
+disp(' ')
+disp('The trained model has parameters ')
+disp(' Priors Centres Variances')
+disp([newmix.priors' newmix.centres newmix.covars'])
+disp('Note the close correspondence between these parameters and those')
+disp('of the distribution used to generate the data')
+disp(' ')
+disp(' Priors Centres Variances')
+disp([mix.priors' mix.centres mix.covars'])
+disp(' ')
+disp('Press any key to exit')
+pause
+
+close(fh1);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demhmc2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demhmc2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,132 @@
+%DEMHMC2 Demonstrate Bayesian regression with Hybrid Monte Carlo sampling.
+%
+% Description
+% The problem consists of one input variable X and one target variable
+% T with data generated by sampling X at equal intervals and then
+% generating target data by computing SIN(2*PI*X) and adding Gaussian
+% noise. The model is a 2-layer network with linear outputs, and the
+% hybrid Monte Carlo algorithm (without persistence) is used to sample
+% from the posterior distribution of the weights. The graph shows the
+% underlying function, 100 samples from the function given by the
+% posterior distribution of the weights, and the average prediction
+% (weighted by the posterior probabilities).
+%
+% See also
+% DEMHMC3, HMC, MLP, MLPERR, MLPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Generate the matrix of inputs x and targets t.
+ndata = 20; % Number of data points.
+noise = 0.1; % Standard deviation of noise distribution.
+nin = 1; % Number of inputs.
+nout = 1; % Number of outputs.
+
+seed = 42; % Seed for random weight initialization.
+randn('state', seed);
+rand('state', seed);
+
+x = 0.25 + 0.1*randn(ndata, nin);
+t = sin(2*pi*x) + noise*randn(size(x));
+
+clc
+disp('This demonstration illustrates the use of the hybrid Monte Carlo')
+disp('algorithm to sample from the posterior weight distribution of a')
+disp('multi-layer perceptron.')
+disp(' ')
+disp('A regression problem is used, with the one-dimensional data drawn')
+disp('from a noisy sine function. The x values are sampled from a normal')
+disp('distribution with mean 0.25 and variance 0.01.')
+disp(' ')
+disp('First we initialise the network.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Set up network parameters.
+nhidden = 5; % Number of hidden units.
+alpha = 0.001; % Coefficient of weight-decay prior.
+beta = 100.0; % Coefficient of data error.
+
+% Create and initialize network model.
+% Initialise weights reasonably close to 0
+net = mlp(nin, nhidden, nout, 'linear', alpha, beta);
+net = mlpinit(net, 10);
+
+clc
+disp('Next we take 100 samples from the posterior distribution. The first')
+disp('200 samples at the start of the chain are omitted. As persistence')
+disp('is not used, the momentum is randomised at each step. 100 iterations')
+disp('are used at each step. The new state is accepted if the threshold')
+disp('value is greater than a random number between 0 and 1.')
+disp(' ')
+disp('Negative step numbers indicate samples discarded from the start of the')
+disp('chain.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Set up vector of options for hybrid Monte Carlo.
+nsamples = 100; % Number of retained samples.
+
+options = foptions; % Default options vector.
+options(1) = 1; % Switch on diagnostics.
+options(7) = 100; % Number of steps in trajectory.
+options(14) = nsamples; % Number of Monte Carlo samples returned.
+options(15) = 200; % Number of samples omitted at start of chain.
+options(18) = 0.002; % Step size.
+
+w = mlppak(net);
+% Initialise HMC
+hmc('state', 42);
+[samples, energies] = hmc('neterr', w, options, 'netgrad', net, x, t);
+
+clc
+disp('The plot shows the underlying noise free function, the 100 samples')
+disp('produced from the MLP, and their average as a Monte Carlo estimate')
+disp('of the true posterior average.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+nplot = 300;
+plotvals = [0 : 1/(nplot - 1) : 1]';
+pred = zeros(size(plotvals));
+fh = figure;
+for k = 1:nsamples
+ w2 = samples(k,:);
+ net2 = mlpunpak(net, w2);
+ y = mlpfwd(net2, plotvals);
+ % Average sample predictions as Monte Carlo estimate of true integral
+ pred = pred + y;
+ h4 = plot(plotvals, y, '-r', 'LineWidth', 1);
+ if k == 1
+ hold on
+ end
+end
+pred = pred./nsamples;
+
+% Plot data
+h1 = plot(x, t, 'ob', 'LineWidth', 2, 'MarkerFaceColor', 'blue');
+axis([0 1 -3 3])
+
+% Plot function
+[fx, fy] = fplot('sin(2*pi*x)', [0 1], '--g');
+h2 = plot(fx, fy, '--g', 'LineWidth', 2);
+set(gca, 'box', 'on');
+
+% Plot averaged prediction
+h3 = plot(plotvals, pred, '-c', 'LineWidth', 2);
+hold off
+
+lstrings = char('Data', 'Function', 'Prediction', 'Samples');
+legend([h1 h2 h3 h4], lstrings, 3);
+
+disp('Note how the predictions become much further from the true function')
+disp('away from the region of high data density.')
+disp(' ')
+disp('Press any key to exit.')
+pause
+close(fh);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demhmc3.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demhmc3.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,135 @@
+%DEMHMC3 Demonstrate Bayesian regression with Hybrid Monte Carlo sampling.
+%
+% Description
+% The problem consists of one input variable X and one target variable
+% T with data generated by sampling X at equal intervals and then
+% generating target data by computing SIN(2*PI*X) and adding Gaussian
+% noise. The model is a 2-layer network with linear outputs, and the
+% hybrid Monte Carlo algorithm (with persistence) is used to sample
+% from the posterior distribution of the weights. The graph shows the
+% underlying function, 300 samples from the function given by the
+% posterior distribution of the weights, and the average prediction
+% (weighted by the posterior probabilities).
+%
+% See also
+% DEMHMC2, HMC, MLP, MLPERR, MLPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Generate the matrix of inputs x and targets t.
+ndata = 20; % Number of data points.
+noise = 0.1; % Standard deviation of noise distribution.
+nin = 1; % Number of inputs.
+nout = 1; % Number of outputs.
+
+seed = 42; % Seed for random number generators.
+randn('state', seed);
+rand('state', seed);
+
+x = 0.25 + 0.1*randn(ndata, nin);
+t = sin(2*pi*x) + noise*randn(size(x));
+
+clc
+disp('This demonstration illustrates the use of the hybrid Monte Carlo')
+disp('algorithm to sample from the posterior weight distribution of a')
+disp('multi-layer perceptron.')
+disp(' ')
+disp('A regression problem is used, with the one-dimensional data drawn')
+disp('from a noisy sine function. The x values are sampled from a normal')
+disp('distribution with mean 0.25 and variance 0.01.')
+disp(' ')
+disp('First we initialise the network.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Set up network parameters.
+nhidden = 5; % Number of hidden units.
+alpha = 0.001; % Coefficient of weight-decay prior.
+beta = 100.0; % Coefficient of data error.
+
+% Create and initialize network model.
+
+% Initialise weights reasonably close to 0
+net = mlp(nin, nhidden, nout, 'linear', alpha, beta);
+net = mlpinit(net, 10);
+
+clc
+disp('Next we take 100 samples from the posterior distribution. The first')
+disp('300 samples at the start of the chain are omitted. As persistence')
+disp('is used, the momentum has a small random component added at each step.')
+disp('10 iterations are used at each step (compared with 100 in demhmc2).')
+disp('The step size is 0.005 (compared with 0.002).')
+disp('The new state is accepted if the threshold')
+disp('value is greater than a random number between 0 and 1.')
+disp(' ')
+disp('Negative step numbers indicate samples discarded from the start of the')
+disp('chain.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Set up vector of options for hybrid Monte Carlo.
+nsamples = 100; % Number of retained samples.
+
+options = foptions; % Default options vector.
+options(1) = 1; % Switch on diagnostics.
+options(5) = 1; % Use persistence
+options(7) = 10; % Number of steps in trajectory.
+options(14) = nsamples; % Number of Monte Carlo samples returned.
+options(15) = 300; % Number of samples omitted at start of chain.
+options(17) = 0.95; % Alpha value in persistence
+options(18) = 0.005; % Step size.
+
+w = mlppak(net);
+% Initialise HMC
+hmc('state', 42);
+[samples, energies] = hmc('neterr', w, options, 'netgrad', net, x, t);
+
+clc
+disp('The plot shows the underlying noise free function, the 100 samples')
+disp('produced from the MLP, and their average as a Monte Carlo estimate')
+disp('of the true posterior average.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+nplot = 300;
+plotvals = [0 : 1/(nplot - 1) : 1]';
+pred = zeros(size(plotvals));
+fh1 = figure;
+hold on
+for k = 1:nsamples
+ w2 = samples(k,:);
+ net2 = mlpunpak(net, w2);
+ y = mlpfwd(net2, plotvals);
+ % Sum predictions
+ pred = pred + y;
+ h4 = plot(plotvals, y, '-r', 'LineWidth', 1);
+end
+pred = pred./nsamples;
+% Plot data
+h1 = plot(x, t, 'ob', 'LineWidth', 2, 'MarkerFaceColor', 'blue');
+axis([0 1 -3 3])
+
+% Plot function
+[fx, fy] = fplot('sin(2*pi*x)', [0 1], '--g');
+h2 = plot(fx, fy, '--g', 'LineWidth', 2);
+set(gca, 'box', 'on');
+
+% Plot averaged prediction
+h3 = plot(plotvals, pred, '-c', 'LineWidth', 2);
+
+lstrings = char('Data', 'Function', 'Prediction', 'Samples');
+legend([h1 h2 h3 h4], lstrings, 3);
+hold off
+
+disp('Note how the predictions become much further from the true function')
+disp('away from the region of high data density.')
+disp(' ')
+disp('Press any key to exit.')
+pause
+close(fh1);
+clear all;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demkmn1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demkmn1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,111 @@
+%DEMKMEAN Demonstrate simple clustering model trained with K-means.
+%
+% Description
+% The problem consists of data in a two-dimensional space. The data is
+% drawn from three spherical Gaussian distributions with priors 0.3,
+% 0.5 and 0.2; centres (2, 3.5), (0, 0) and (0,2); and standard
+% deviations 0.2, 0.5 and 1.0. The first figure contains a scatter plot
+% of the data. The data is the same as in DEMGMM1.
+%
+% A cluster model with three components is trained using the batch K-
+% means algorithm. The matrix of centres is printed after training. The
+% second figure shows the data labelled with a colour derived from the
+% corresponding cluster
+%
+% See also
+% DEM2DDAT, DEMGMM1, KNN1, KMEANS
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Generate the data, fixing seeds for reproducible results
+ndata = 250;
+randn('state', 42);
+rand('state', 42);
+data = dem2ddat(ndata);
+
+% Randomise data order
+data = data(randperm(ndata),:);
+
+clc
+disp('This demonstration illustrates the use of a cluster model to')
+disp('find centres that reflect the distribution of data points.')
+disp('We begin by generating the data from a mixture of three Gaussians')
+disp('in two-dimensional space and plotting it.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+fh1 = figure;
+plot(data(:, 1), data(:, 2), 'o')
+set(gca, 'Box', 'on')
+title('Data')
+
+% Set up cluster model
+ncentres = 3;
+centres = zeros(ncentres, 2);
+
+% Set up vector of options for kmeans trainer
+options = foptions;
+options(1) = 1; % Prints out error values.
+options(5) = 1;
+options(14) = 10; % Number of iterations.
+
+clc
+disp('The model is chosen to have three centres, which are initialised')
+disp('at randomly selected data points. We now train the model using')
+disp('the batch K-means algorithm with a maximum of 10 iterations and')
+disp('stopping tolerance of 1e-4.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Train the centres from the data
+[centres, options, post] = kmeans(centres, data, options);
+
+% Print out model
+disp(' ')
+disp('Note that training has terminated before 10 iterations as there')
+disp('has been no change in the centres or error function.')
+disp(' ')
+disp('The trained model has centres:')
+disp(centres);
+disp('Press any key to continue.')
+pause
+
+clc
+disp('We now plot each data point coloured according to its classification')
+disp('given by the nearest cluster centre. The cluster centres are denoted')
+disp('by black crosses.')
+
+% Plot the result
+fh2 = figure;
+
+hold on
+colours = ['b.'; 'r.'; 'g.'];
+
+[tempi, tempj] = find(post);
+hold on
+for i = 1:3
+ % Select data points closest to ith centre
+ thisX = data(tempi(tempj == i), 1);
+ thisY = data(tempi(tempj == i), 2);
+ hp(i) = plot(thisX, thisY, colours(i,:));
+ set(hp(i), 'MarkerSize', 12);
+end
+set(gca, 'Box', 'on')
+legend('Class 1', 'Class 2', 'Class 3', 2)
+hold on
+plot(centres(:, 1), centres(:,2), 'k+', 'LineWidth', 2, ...
+ 'MarkerSize', 8)
+title('Centres and data labels')
+hold off
+
+disp(' ')
+disp('Press any key to end.')
+pause
+
+close(fh1);
+close(fh2);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demknn1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demknn1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,103 @@
+%DEMKNN1 Demonstrate nearest neighbour classifier.
+%
+% Description
+% The problem consists of data in a two-dimensional space. The data is
+% drawn from three spherical Gaussian distributions with priors 0.3,
+% 0.5 and 0.2; centres (2, 3.5), (0, 0) and (0,2); and standard
+% deviations 0.2, 0.5 and 1.0. The first figure contains a scatter plot
+% of the data. The data is the same as in DEMGMM1.
+%
+% The second figure shows the data labelled with the corresponding
+% class given by the classifier.
+%
+% See also
+% DEM2DDAT, DEMGMM1, KNN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+clc
+disp('This program demonstrates the use of the K nearest neighbour algorithm.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Generate the test data
+ndata = 250;
+randn('state', 42);
+rand('state', 42);
+
+[data, c] = dem2ddat(ndata);
+
+% Randomise data order
+data = data(randperm(ndata),:);
+
+clc
+disp('We generate the data in two-dimensional space from a mixture of')
+disp('three spherical Gaussians. The centres are shown as black crosses')
+disp('in the plot.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+fh1 = figure;
+plot(data(:, 1), data(:, 2), 'o')
+set(gca, 'Box', 'on')
+hold on
+title('Data')
+hp1 = plot(c(:, 1), c(:,2), 'k+')
+% Increase size of crosses
+set(hp1, 'MarkerSize', 8);
+set(hp1, 'LineWidth', 2);
+hold off
+
+clc
+disp('We next use the centres as training examplars for the K nearest')
+disp('neighbour algorithm.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Use centres as training data
+train_labels = [1, 0, 0; 0, 1, 0; 0, 0, 1];
+
+% Label the test data up to kmax neighbours
+kmax = 1;
+net = knn(2, 3, kmax, c, train_labels);
+[y, l] = knnfwd(net, data);
+
+clc
+disp('We now plot each data point coloured according to its classification.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Plot the result
+fh2 = figure;
+colors = ['b.'; 'r.'; 'g.'];
+for i = 1:3
+ thisX = data(l == i,1);
+ thisY = data(l == i,2);
+ hp(i) = plot(thisX, thisY, colors(i,:));
+ set(hp(i), 'MarkerSize', 12);
+ if i == 1
+ hold on
+ end
+end
+set(gca, 'Box', 'on');
+legend('Class 1', 'Class 2', 'Class 3', 2)
+hold on
+labels = ['1', '2', '3'];
+hp2 = plot(c(:, 1), c(:,2), 'k+');
+% Increase size of crosses
+set(hp2, 'MarkerSize', 8);
+set(hp2, 'LineWidth', 2);
+
+test_labels = labels(l(:,1));
+
+title('Training data and data labels')
+hold off
+
+disp('The demonstration is now complete: press any key to exit.')
+pause
+close(fh1);
+close(fh2);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demmdn1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demmdn1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,211 @@
+%DEMMDN1 Demonstrate fitting a multi-valued function using a Mixture Density Network.
+%
+% Description
+% The problem consists of one input variable X and one target variable
+% T with data generated by sampling T at equal intervals and then
+% generating target data by computing T + 0.3*SIN(2*PI*T) and adding
+% Gaussian noise. A Mixture Density Network with 3 centres in the
+% mixture model is trained by minimizing a negative log likelihood
+% error function using the scaled conjugate gradient optimizer.
+%
+% The conditional means, mixing coefficients and variances are plotted
+% as a function of X, and a contour plot of the full conditional
+% density is also generated.
+%
+% See also
+% MDN, MDNERR, MDNGRAD, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Generate the matrix of inputs x and targets t.
+seedn = 42;
+seed = 42;
+randn('state', seedn);
+rand('state', seed);
+ndata = 300; % Number of data points.
+noise = 0.2; % Range of noise distribution.
+t = [0:1/(ndata - 1):1]';
+x = t + 0.3*sin(2*pi*t) + noise*rand(ndata, 1) - noise/2;
+axis_limits = [-0.2 1.2 -0.2 1.2];
+
+clc
+disp('This demonstration illustrates the use of a Mixture Density Network')
+disp('to model multi-valued functions. The data is generated from the')
+disp('mapping x = t + 0.3 sin(2 pi t) + e, where e is a noise term.')
+disp('We begin by plotting the data.')
+disp(' ')
+disp('Press any key to continue')
+pause
+% Plot the data
+fh1 = figure;
+p1 = plot(x, t, 'ob');
+axis(axis_limits);
+hold on
+disp('Note that for x in the range 0.35 to 0.65, there are three possible')
+disp('branches of the function.')
+disp(' ')
+disp('Press any key to continue')
+pause
+
+% Set up network parameters.
+nin = 1; % Number of inputs.
+nhidden = 5; % Number of hidden units.
+ncentres = 3; % Number of mixture components.
+dim_target = 1; % Dimension of target space
+mdntype = '0'; % Currently unused: reserved for future use
+alpha = 100; % Inverse variance for weight initialisation
+ % Make variance small for good starting point
+
+% Create and initialize network weight vector.
+net = mdn(nin, nhidden, ncentres, dim_target, mdntype);
+init_options = zeros(1, 18);
+init_options(1) = -1; % Suppress all messages
+init_options(14) = 10; % 10 iterations of K means in gmminit
+net = mdninit(net, alpha, t, init_options);
+
+% Set up vector of options for the optimiser.
+options = foptions;
+options(1) = 1; % This provides display of error values.
+options(14) = 200; % Number of training cycles.
+
+clc
+disp('We initialise the neural network model, which is an MLP with a')
+disp('Gaussian mixture model with three components and spherical variance')
+disp('as the error function. This enables us to model the complete')
+disp('conditional density function.')
+disp(' ')
+disp('Next we train the model for 200 epochs using a scaled conjugate gradient')
+disp('optimizer. The error function is the negative log likelihood of the')
+disp('training data.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Train using scaled conjugate gradients.
+[net, options] = netopt(net, options, x, t, 'scg');
+
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+clc
+disp('We can also train a conventional MLP with sum of squares error function.')
+disp('This will approximate the conditional mean, which is not always a')
+disp('good representation of the data. Note that the error function is the')
+disp('sum of squares error on the training data, which accounts for the')
+disp('different values from training the MDN.')
+disp(' ')
+disp('We train the network with the quasi-Newton optimizer for 80 epochs.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+mlp_nhidden = 8;
+net2 = mlp(nin, mlp_nhidden, dim_target, 'linear');
+options(14) = 80;
+[net2, options] = netopt(net2, options, x, t, 'quasinew');
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+clc
+disp('Now we plot the underlying function, the MDN prediction,')
+disp('represented by the mode of the conditional distribution, and the')
+disp('prediction of the conventional MLP.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Plot the original function, and the trained network function.
+plotvals = [0:0.01:1]';
+mixes = mdn2gmm(mdnfwd(net, plotvals));
+axis(axis_limits);
+yplot = t+0.3*sin(2*pi*t);
+p2 = plot(yplot, t, '--y');
+
+% Use the mode to represent the function
+y = zeros(1, length(plotvals));
+priors = zeros(length(plotvals), ncentres);
+c = zeros(length(plotvals), 3);
+widths = zeros(length(plotvals), ncentres);
+for i = 1:length(plotvals)
+ [m, j] = max(mixes(i).priors);
+ y(i) = mixes(i).centres(j,:);
+ c(i,:) = mixes(i).centres';
+end
+p3 = plot(plotvals, y, '*r');
+p4 = plot(plotvals, mlpfwd(net2, plotvals), 'g');
+set(p4, 'LineWidth', 2);
+legend([p1 p2 p3 p4], 'data', 'function', 'MDN mode', 'MLP mean', 4);
+hold off
+
+clc
+disp('We can also plot how the mixture model parameters depend on x.')
+disp('First we plot the mixture centres, then the priors and finally')
+disp('the variances.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+fh2 = figure;
+subplot(3, 1, 1)
+plot(plotvals, c)
+hold on
+title('Mixture centres')
+legend('centre 1', 'centre 2', 'centre 3')
+hold off
+
+priors = reshape([mixes.priors], mixes(1).ncentres, size(mixes, 2))';
+%%fh3 = figure;
+subplot(3, 1, 2)
+plot(plotvals, priors)
+hold on
+title('Mixture priors')
+legend('centre 1', 'centre 2', 'centre 3')
+hold off
+
+variances = reshape([mixes.covars], mixes(1).ncentres, size(mixes, 2))';
+%%fh4 = figure;
+subplot(3, 1, 3)
+plot(plotvals, variances)
+hold on
+title('Mixture variances')
+legend('centre 1', 'centre 2', 'centre 3')
+hold off
+
+disp('The last figure is a contour plot of the conditional probability')
+disp('density generated by the Mixture Density Network. Note how it')
+disp('is well matched to the regions of high data density.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Contour plot for MDN.
+i = 0:0.01:1.0;
+j = 0:0.01:1.0;
+
+[I, J] = meshgrid(i,j);
+I = I(:);
+J = J(:);
+li = length(i);
+lj = length(j);
+Z = zeros(li, lj);
+for k = 1:li;
+ Z(:,k) = gmmprob(mixes(k), j');
+end
+fh5 = figure;
+% Set up levels by hand to make a good figure
+v = [2 2.5 3 3.5 5:3:18];
+contour(i, j, Z, v)
+hold on
+title('Contour plot of conditional density')
+hold off
+
+disp(' ')
+disp('Press any key to exit.')
+pause
+close(fh1);
+close(fh2);
+%%close(fh3);
+%%close(fh4);
+close(fh5);
+%%clear all;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demmet1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demmet1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,103 @@
+function samples=demmet1(plot_wait)
+%DEMMET1 Demonstrate Markov Chain Monte Carlo sampling on a Gaussian.
+%
+% Description
+% The problem consists of generating data from a Gaussian in two
+% dimensions using a Markov Chain Monte Carlo algorithm. The points are
+% plotted one after another to show the path taken by the chain.
+%
+% DEMMET1(PLOTWAIT) allows the user to set the time (in a whole number
+% of seconds) between the plotting of points. This is passed to PAUSE
+%
+% See also
+% DEMHMC1, METROP, GMM, DEMPOT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargin == 0 | plot_wait < 0
+ plot_wait = 0; % No wait if not specified or incorrect
+end
+dim = 2; % Data dimension
+ncentres = 1; % Number of centres in mixture model
+
+seed = 42; % Seed for random weight initialization.
+randn('state', seed);
+rand('state', seed);
+
+clc
+disp('This demonstration illustrates the use of the Markov chain Monte Carlo')
+disp('algorithm to sample from a Gaussian distribution.')
+disp('The mean is at [0 0].')
+disp(' ')
+disp('First we set up the parameters of the mixture model we are sampling')
+disp('from.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Set up mixture model to sample from
+mix = gmm(dim, ncentres, 'spherical');
+mix.centres(1, :) = [0 0];
+x = [0 4]; % Start vector
+
+% Set up vector of options for hybrid Monte Carlo.
+
+nsamples = 150; % Number of retained samples.
+
+options = foptions; % Default options vector.
+options(1) = 0; % Switch off diagnostics.
+options(14) = nsamples; % Number of Monte Carlo samples returned.
+options(18) = 0.1;
+
+clc
+disp('Next we take 150 samples from the distribution.')
+disp('Sampling starts at the point [0 4].')
+disp('The new state is accepted if the threshold value is greater than')
+disp('a random number between 0 and 1.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+[samples, energies] = metrop('dempot', x, options, '', mix);
+
+clc
+disp('The plot shows the samples generated by the MCMC function in order')
+disp('as an animation to show the path taken by the Markov chain.')
+disp('The different colours are used to show that the first few samples')
+disp('should be discarded as they lie too far from the mean.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+probs = exp(-energies);
+fh1 = figure;
+g1end = floor(nsamples/4);
+
+for n = 1:nsamples
+
+ if n < g1end
+ Marker = 'k.';
+ p1 = plot(samples(n,1), samples(n,2), Marker, ...
+ 'EraseMode', 'none', 'MarkerSize', 12);
+ if n == 1
+ axis([-3 5 -2 5])
+ end
+ else
+ Marker = 'r.';
+ p2 = plot(samples(n,1), samples(n,2), Marker, ...
+ 'EraseMode', 'none', 'MarkerSize', 12);
+ end
+ hold on
+ drawnow; % Force drawing immediately
+ pause(plot_wait);
+end
+lstrings = char(['Samples 1-' int2str(g1end)], ...
+ ['Samples ' int2str(g1end+1) '-' int2str(nsamples)]);
+legend([p1 p2], lstrings, 1);
+
+disp(' ')
+disp('Press any key to exit.')
+pause
+close(fh1);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demmlp1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demmlp1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,88 @@
+%DEMMLP1 Demonstrate simple regression using a multi-layer perceptron
+%
+% Description
+% The problem consists of one input variable X and one target variable
+% T with data generated by sampling X at equal intervals and then
+% generating target data by computing SIN(2*PI*X) and adding Gaussian
+% noise. A 2-layer network with linear outputs is trained by minimizing
+% a sum-of-squares error function using the scaled conjugate gradient
+% optimizer.
+%
+% See also
+% MLP, MLPERR, MLPGRAD, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Generate the matrix of inputs x and targets t.
+
+ndata = 20; % Number of data points.
+noise = 0.2; % Standard deviation of noise distribution.
+x = [0:1/(ndata - 1):1]';
+randn('state', 1);
+t = sin(2*pi*x) + noise*randn(ndata, 1);
+
+clc
+disp('This demonstration illustrates the use of a Multi-Layer Perceptron')
+disp('network for regression problems. The data is generated from a noisy')
+disp('sine function.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Set up network parameters.
+nin = 1; % Number of inputs.
+nhidden = 3; % Number of hidden units.
+nout = 1; % Number of outputs.
+alpha = 0.01; % Coefficient of weight-decay prior.
+
+% Create and initialize network weight vector.
+
+net = mlp(nin, nhidden, nout, 'linear', alpha);
+
+% Set up vector of options for the optimiser.
+
+options = zeros(1,18);
+options(1) = 1; % This provides display of error values.
+options(14) = 100; % Number of training cycles.
+
+clc
+disp(['The network has ', num2str(nhidden), ' hidden units and a weight decay'])
+disp(['coefficient of ', num2str(alpha), '.'])
+disp(' ')
+disp('After initializing the network, we train it use the scaled conjugate')
+disp('gradients algorithm for 100 cycles.')
+disp(' ')
+disp('Press any key to continue')
+pause
+
+% Train using scaled conjugate gradients.
+[net, options] = netopt(net, options, x, t, 'scg');
+
+disp(' ')
+disp('Now we plot the data, underlying function, and network outputs')
+disp('on a single graph to compare the results.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Plot the data, the original function, and the trained network function.
+plotvals = [0:0.01:1]';
+y = mlpfwd(net, plotvals);
+fh1 = figure;
+plot(x, t, 'ob')
+hold on
+xlabel('Input')
+ylabel('Target')
+axis([0 1 -1.5 1.5])
+[fx, fy] = fplot('sin(2*pi*x)', [0 1]);
+plot(fx, fy, '-r', 'LineWidth', 2)
+plot(plotvals, y, '-k', 'LineWidth', 2)
+legend('data', 'function', 'network');
+
+disp(' ')
+disp('Press any key to end.')
+pause
+close(fh1);
+clear all;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demmlp2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demmlp2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,337 @@
+%DEMMLP2 Demonstrate simple classification using a multi-layer perceptron
+%
+% Description
+% The problem consists of input data in two dimensions drawn from a
+% mixture of three Gaussians: two of which are assigned to a single
+% class. An MLP with logistic outputs trained with a quasi-Newton
+% optimisation algorithm is compared with the optimal Bayesian decision
+% rule.
+%
+% See also
+% MLP, MLPFWD, NETERR, QUASINEW
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Set up some figure parameters
+AxisShift = 0.05;
+ClassSymbol1 = 'r.';
+ClassSymbol2 = 'y.';
+PointSize = 12;
+titleSize = 10;
+
+% Fix the seeds
+rand('state', 423);
+randn('state', 423);
+
+clc
+disp('This demonstration shows how an MLP with logistic outputs and')
+disp('and cross entropy error function can be trained to model the')
+disp('posterior class probabilities in a classification problem.')
+disp('The results are compared with the optimal Bayes rule classifier,')
+disp('which can be computed exactly as we know the form of the generating')
+disp('distribution.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+fh1 = figure;
+set(fh1, 'Name', 'True Data Distribution');
+whitebg(fh1, 'k');
+
+%
+% Generate the data
+%
+n=200;
+
+% Set up mixture model: 2d data with three centres
+% Class 1 is first centre, class 2 from the other two
+mix = gmm(2, 3, 'full');
+mix.priors = [0.5 0.25 0.25];
+mix.centres = [0 -0.1; 1 1; 1 -1];
+mix.covars(:,:,1) = [0.625 -0.2165; -0.2165 0.875];
+mix.covars(:,:,2) = [0.2241 -0.1368; -0.1368 0.9759];
+mix.covars(:,:,3) = [0.2375 0.1516; 0.1516 0.4125];
+
+[data, label] = gmmsamp(mix, n);
+
+%
+% Calculate some useful axis limits
+%
+x0 = min(data(:,1));
+x1 = max(data(:,1));
+y0 = min(data(:,2));
+y1 = max(data(:,2));
+dx = x1-x0;
+dy = y1-y0;
+expand = 5/100; % Add on 5 percent each way
+x0 = x0 - dx*expand;
+x1 = x1 + dx*expand;
+y0 = y0 - dy*expand;
+y1 = y1 + dy*expand;
+resolution = 100;
+step = dx/resolution;
+xrange = [x0:step:x1];
+yrange = [y0:step:y1];
+%
+% Generate the grid
+%
+[X Y]=meshgrid([x0:step:x1],[y0:step:y1]);
+%
+% Calculate the class conditional densities, the unconditional densities and
+% the posterior probabilities
+%
+px_j = gmmactiv(mix, [X(:) Y(:)]);
+px = reshape(px_j*(mix.priors)',size(X));
+post = gmmpost(mix, [X(:) Y(:)]);
+p1_x = reshape(post(:, 1), size(X));
+p2_x = reshape(post(:, 2) + post(:, 3), size(X));
+
+%
+% Generate some pretty pictures !!
+%
+colormap(hot)
+colorbar
+subplot(1,2,1)
+hold on
+plot(data((label==1),1),data(label==1,2),ClassSymbol1, 'MarkerSize', PointSize)
+plot(data((label>1),1),data(label>1,2),ClassSymbol2, 'MarkerSize', PointSize)
+contour(xrange,yrange,p1_x,[0.5 0.5],'w-');
+axis([x0 x1 y0 y1])
+set(gca,'Box','On')
+title('The Sampled Data');
+rect=get(gca,'Position');
+rect(1)=rect(1)-AxisShift;
+rect(3)=rect(3)+AxisShift;
+set(gca,'Position',rect)
+hold off
+
+subplot(1,2,2)
+imagesc(X(:),Y(:),px);
+hold on
+[cB, hB] = contour(xrange,yrange,p1_x,[0.5 0.5],'w:');
+set(hB,'LineWidth', 2);
+axis([x0 x1 y0 y1])
+set(gca,'YDir','normal')
+title('Probability Density p(x)')
+hold off
+
+drawnow;
+clc;
+disp('The first figure shows the data sampled from a mixture of three')
+disp('Gaussians, the first of which (whose centre is near the origin) is')
+disp('labelled red and the other two are labelled yellow. The second plot')
+disp('shows the unconditional density of the data with the optimal Bayesian')
+disp('decision boundary superimposed.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+fh2 = figure;
+set(fh2, 'Name', 'Class-conditional Densities and Posterior Probabilities');
+whitebg(fh2, 'w');
+
+subplot(2,2,1)
+p1=reshape(px_j(:,1),size(X));
+imagesc(X(:),Y(:),p1);
+colormap hot
+colorbar
+axis(axis)
+set(gca,'YDir','normal')
+hold on
+plot(mix.centres(:,1),mix.centres(:,2),'b+','MarkerSize',8,'LineWidth',2)
+title('Density p(x|red)')
+hold off
+
+subplot(2,2,2)
+p2=reshape((px_j(:,2)+px_j(:,3)),size(X));
+imagesc(X(:),Y(:),p2);
+colorbar
+set(gca,'YDir','normal')
+hold on
+plot(mix.centres(:,1),mix.centres(:,2),'b+','MarkerSize',8,'LineWidth',2)
+title('Density p(x|yellow)')
+hold off
+
+subplot(2,2,3)
+imagesc(X(:),Y(:),p1_x);
+set(gca,'YDir','normal')
+colorbar
+title('Posterior Probability p(red|x)')
+hold on
+plot(mix.centres(:,1),mix.centres(:,2),'b+','MarkerSize',8,'LineWidth',2)
+hold off
+
+subplot(2,2,4)
+imagesc(X(:),Y(:),p2_x);
+set(gca,'YDir','normal')
+colorbar
+title('Posterior Probability p(yellow|x)')
+hold on
+plot(mix.centres(:,1),mix.centres(:,2),'b+','MarkerSize',8,'LineWidth',2)
+hold off
+
+% Now set up and train the MLP
+nhidden=6;
+nout=1;
+alpha = 0.2; % Weight decay
+ncycles = 60; % Number of training cycles.
+% Set up MLP network
+net = mlp(2, nhidden, nout, 'logistic', alpha);
+options = zeros(1,18);
+options(1) = 1; % Print out error values
+options(14) = ncycles;
+
+mlpstring = ['We now set up an MLP with ', num2str(nhidden), ...
+ ' hidden units, logistic output and cross'];
+trainstring = ['entropy error function, and train it for ', ...
+ num2str(ncycles), ' cycles using the'];
+wdstring = ['quasi-Newton optimisation algorithm with weight decay of ', ...
+ num2str(alpha), '.'];
+
+% Force out the figure before training the MLP
+drawnow;
+disp(' ')
+disp('The second figure shows the class conditional densities and posterior')
+disp('probabilities for each class. The blue crosses mark the centres of')
+disp('the three Gaussians.')
+disp(' ')
+disp(mlpstring)
+disp(trainstring)
+disp(wdstring)
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Convert targets to 0-1 encoding
+target=[label==1];
+
+% Train using quasi-Newton.
+[net] = netopt(net, options, data, target, 'quasinew');
+y = mlpfwd(net, data);
+yg = mlpfwd(net, [X(:) Y(:)]);
+yg = reshape(yg(:,1),size(X));
+
+fh3 = figure;
+set(fh3, 'Name', 'Network Output');
+whitebg(fh3, 'k')
+subplot(1, 2, 1)
+hold on
+plot(data((label==1),1),data(label==1,2),'r.', 'MarkerSize', PointSize)
+plot(data((label>1),1),data(label>1,2),'y.', 'MarkerSize', PointSize)
+% Bayesian decision boundary
+[cB, hB] = contour(xrange,yrange,p1_x,[0.5 0.5],'b-');
+[cN, hN] = contour(xrange,yrange,yg,[0.5 0.5],'r-');
+set(hB, 'LineWidth', 2);
+set(hN, 'LineWidth', 2);
+Chandles = [hB(1) hN(1)];
+legend(Chandles, 'Bayes', ...
+ 'Network', 3);
+
+axis([x0 x1 y0 y1])
+set(gca,'Box','on','XTick',[],'YTick',[])
+
+title('Training Data','FontSize',titleSize);
+hold off
+
+subplot(1, 2, 2)
+imagesc(X(:),Y(:),yg);
+colormap hot
+colorbar
+axis(axis)
+set(gca,'YDir','normal','XTick',[],'YTick',[])
+title('Network Output','FontSize',titleSize)
+
+clc
+disp('This figure shows the training data with the decision boundary')
+disp('produced by the trained network and the network''s prediction of')
+disp('the posterior probability of the red class.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+%
+% Now generate and classify a test data set
+%
+[testdata testlabel] = gmmsamp(mix, n);
+testlab=[testlabel==1 testlabel>1];
+
+% This is the Bayesian classification
+tpx_j = gmmpost(mix, testdata);
+Bpost = [tpx_j(:,1), tpx_j(:,2)+tpx_j(:,3)];
+[Bcon Brate]=confmat(Bpost, [testlabel==1 testlabel>1]);
+
+% Compute network classification
+yt = mlpfwd(net, testdata);
+% Convert single output to posteriors for both classes
+testpost = [yt 1-yt];
+[C trate]=confmat(testpost,[testlabel==1 testlabel>1]);
+
+fh4 = figure;
+set(fh4, 'Name', 'Decision Boundaries');
+whitebg(fh4, 'k');
+hold on
+plot(testdata((testlabel==1),1),testdata((testlabel==1),2),...
+ ClassSymbol1, 'MarkerSize', PointSize)
+plot(testdata((testlabel>1),1),testdata((testlabel>1),2),...
+ ClassSymbol2, 'MarkerSize', PointSize)
+% Bayesian decision boundary
+[cB, hB] = contour(xrange,yrange,p1_x,[0.5 0.5],'b-');
+set(hB, 'LineWidth', 2);
+% Network decision boundary
+[cN, hN] = contour(xrange,yrange,yg,[0.5 0.5],'r-');
+set(hN, 'LineWidth', 2);
+Chandles = [hB(1) hN(1)];
+legend(Chandles, 'Bayes decision boundary', ...
+ 'Network decision boundary', -1);
+axis([x0 x1 y0 y1])
+title('Test Data')
+set(gca,'Box','On','Xtick',[],'YTick',[])
+
+clc
+disp('This figure shows the test data with the decision boundary')
+disp('produced by the trained network and the optimal Bayes rule.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+fh5 = figure;
+set(fh5, 'Name', 'Test Set Performance');
+whitebg(fh5, 'w');
+% Bayes rule performance
+subplot(1,2,1)
+plotmat(Bcon,'b','k',12)
+set(gca,'XTick',[0.5 1.5])
+set(gca,'YTick',[0.5 1.5])
+grid('off')
+set(gca,'XTickLabel',['Red ' ; 'Yellow'])
+set(gca,'YTickLabel',['Yellow' ; 'Red '])
+ylabel('True')
+xlabel('Predicted')
+title(['Bayes Confusion Matrix (' num2str(Brate(1)) '%)'])
+
+% Network performance
+subplot(1,2, 2)
+plotmat(C,'b','k',12)
+set(gca,'XTick',[0.5 1.5])
+set(gca,'YTick',[0.5 1.5])
+grid('off')
+set(gca,'XTickLabel',['Red ' ; 'Yellow'])
+set(gca,'YTickLabel',['Yellow' ; 'Red '])
+ylabel('True')
+xlabel('Predicted')
+title(['Network Confusion Matrix (' num2str(trate(1)) '%)'])
+
+disp('The final figure shows the confusion matrices for the')
+disp('two rules on the test set.')
+disp(' ')
+disp('Press any key to exit.')
+pause
+whitebg(fh1, 'w');
+whitebg(fh2, 'w');
+whitebg(fh3, 'w');
+whitebg(fh4, 'w');
+whitebg(fh5, 'w');
+close(fh1); close(fh2); close(fh3);
+close(fh4); close(fh5);
+clear all;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demnlab.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demnlab.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,380 @@
+function demnlab(action);
+%DEMNLAB A front-end Graphical User Interface to the demos
+%
+% Description
+% This function will start a user interface allowing the user to select
+% different demonstration functions to view. The demos are divided into
+% 4 groups, with the demo being executed by selecting the desired
+% option from a pop-up menu.
+%
+% See also
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% If run without parameters, initialise gui.
+if nargin<1,
+ action='initialise';
+end;
+
+if strcmp(action,'initialise'),
+
+ % Create figure
+ fig = figure( ...
+ 'Name', 'Netlab Demos', ...
+ 'NumberTitle', 'off', ...
+ 'Color', [0.7529 0.7529 0.7529], ...
+ 'Visible', 'on');
+
+ % Create GROUPS
+ % Bottom of demo buttons
+ group1_bot = 0.20;
+ group1_top = 0.75;
+ uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.03 group1_bot 0.94 group1_top - group1_bot], ...
+ 'BackgroundColor', [0.5 0.5 0.5]);
+
+ % Bottom of help and close buttons
+ group2_bot = 0.04;
+ uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.03 group2_bot 0.94 0.12], ...
+ 'BackgroundColor', [0.5 0.5 0.5]);
+
+ % Draw title
+ hLogoAxis = axes( ...
+ 'Units', 'normalized', ...
+ 'Position', [0.05 0.82 0.90 0.14], ...
+ 'Box', 'off', ...
+ 'XColor', [0 0 0], ...
+ 'YColor', [0 0 0], ...
+ 'Visible', 'on');
+
+ load netlogo; % load image and colour map
+ colormap(netcmap(1:3,:)); % change colour map: don't need many entries
+ image(nlogo); % draw logo
+ axis('image'); % ensures pixels on axis are square
+ axis off; % turn axes off
+
+ % Create static text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Position', [0.05 group1_top-0.1 0.90 0.08], ...
+ 'String', 'Select demo to run:');
+
+ % First row text offset
+ tRow1Offset = 0.14;
+ % Offset between text and button
+ TBoffset = 0.07;
+ % First row button offset
+ bRow1Offset = tRow1Offset+TBoffset;
+ % ONE text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Position', [0.08 group1_top-tRow1Offset 0.36 0.05], ...
+ 'String', 'Regression');
+
+ popup1str(1) = {'Select Option'};
+ popup1str(2) = {'Multi-Layer Perceptron'};
+ popup1str(3) = {'Radial Basis Function'};
+ popup1str(4) = {'Mixture Density Network'};
+ % ONE popup
+ hPop1 = uicontrol(fig, ...
+ 'Style','popup', ...
+ 'Units','normalized', ...
+ 'String', popup1str, ...
+ 'Position', [0.08 group1_top-bRow1Offset 0.36 0.08], ...
+ 'BackgroundColor', [0.7 0.7 0.7], ...
+ 'Callback', 'demnlab popup1');
+
+ % TWO text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Position', [0.56 group1_top-tRow1Offset 0.36 0.05], ...
+ 'String', 'Classification');
+
+ popup2str(1) = popup1str(1);
+ popup2str(2) = {'Generalised Linear Model (2 class)'};
+ popup2str(3) = {'Generalised Linear Model (3 class)'};
+ popup2str(4) = {'Multi-Layer Perceptron'};
+ popup2str(5) = {'K nearest neighbour'};
+ % TWO popup
+ hPop2 = uicontrol(fig, ...
+ 'Style','popup', ...
+ 'Units','normalized', ...
+ 'String', popup2str, ...
+ 'Position', [0.56 group1_top-bRow1Offset 0.36 0.08], ...
+ 'BackgroundColor', [0.7 0.7 0.7], ...
+ 'Callback', 'demnlab popup2');
+
+ tRow2Offset = 0.30;
+ bRow2Offset = tRow2Offset+TBoffset;
+ % THREE text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Position', [0.08 group1_top - tRow2Offset 0.36 0.05], ...
+ 'String', 'Density Modelling and Clustering');
+
+ popup3str(1) = popup1str(1);
+ popup3str(2) = {'Gaussian Mixture (EM training)'};
+ popup3str(3) = {'Gaussian Mixture (spherical)'};
+ popup3str(4) = {'Gaussian Mixture (diagonal)'};
+ popup3str(5) = {'Gaussian Mixture (full)'};
+ popup3str(6) = {'Neuroscale'};
+ popup3str(7) = {'GTM (EM training)'};
+ popup3str(8) = {'GTM (visualisation)'};
+ popup3str(9) = {'K-means clustering'};
+ popup3str(10) = {'Self-Organising Map'};
+ % TWO popup
+ % THREE popup
+ hPop3 = uicontrol(fig, ...
+ 'Style','popup', ...
+ 'Units','normalized', ...
+ 'String', popup3str, ...
+ 'Position', [0.08 group1_top - bRow2Offset 0.36 0.08], ...
+ 'BackgroundColor', [0.7 0.7 0.7], ...
+ 'Callback', 'demnlab popup3');
+
+ % FOUR text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Position', [0.56 group1_top - tRow2Offset 0.36 0.05], ...
+ 'String', 'Bayesian Methods');
+
+ popup4str(1) = popup1str(1);
+ popup4str(2) = {'Sampling the MLP Prior'};
+ popup4str(3) = {'Evidence Approximation for MLP'};
+ popup4str(4) = {'Evidence Approximation for RBF'};
+ popup4str(5) = {'Evidence Approximation in Classification'};
+ popup4str(6) = {'ARD for MLP'};
+ popup4str(7) = {'Sampling the GP Prior'};
+ popup4str(8) = {'GPs for Regression'};
+ popup4str(9) = {'ARD for GP'};
+ % FOUR popup
+ hPop4 = uicontrol(fig, ...
+ 'Style','popup', ...
+ 'Units','normalized', ...
+ 'String', popup4str, ...
+ 'Position', [0.56 group1_top - bRow2Offset 0.36 0.08], ...
+ 'BackgroundColor', [0.7 0.7 0.7], ...
+ 'Callback', 'demnlab popup4');
+
+
+ tRow3Offset = 0.45;
+ bRow3Offset = tRow3Offset+TBoffset;
+ % FIVE text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Position', [0.08 group1_top - tRow3Offset 0.36 0.05], ...
+ 'String', 'Optimisation and Visualisation');
+
+ popup5str(1) = popup1str(1);
+ popup5str(2) = {'Algorithm Comparison'};
+ popup5str(3) = {'On-line Gradient Descent'};
+ popup5str(4) = {'Hinton Diagrams'};
+ % FIVE popup
+ hPop5 = uicontrol(fig, ...
+ 'Style','popup', ...
+ 'Units','normalized', ...
+ 'String',popup5str, ...
+ 'Position', [0.08 group1_top - bRow3Offset 0.36 0.08], ...
+ 'BackgroundColor', [0.7 0.7 0.7], ...
+ 'Callback', 'demnlab popup5');
+
+ % SIX text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Position', [0.56 group1_top - tRow3Offset 0.36 0.05], ...
+ 'String', 'Sampling');
+
+ popup6str(1) = popup1str(1);
+ popup6str(2) = {'Sampling a Gaussian'};
+ popup6str(3) = {'MCMC sampling (Metropolis)'};
+ popup6str(4) = {'Hybrid MC (Gaussian mixture)'};
+ popup6str(5) = {'Hybrid MC for MLP I'};
+ popup6str(6) = {'Hybrid MC for MLP II'};
+ % SIX popup
+ hPop6 = uicontrol(fig, ...
+ 'Style','popup', ...
+ 'Units','normalized', ...
+ 'String', popup6str, ...
+ 'Position', [0.56 group1_top - bRow3Offset 0.36 0.08], ...
+ 'BackgroundColor', [0.7 0.7 0.7], ...
+ 'Callback', 'demnlab popup6');
+
+
+ % Create HELP button
+ uicontrol(fig, ...
+ 'Units', 'normalized', ...
+ 'Position' , [0.05 group2_bot+0.02 0.40 0.08], ...
+ 'String', 'Help', ...
+ 'Callback', 'demnlab help');
+
+ % Create CLOSE button
+ uicontrol(fig, ...
+ 'Units', 'normalized', ...
+ 'Position' , [0.55 group2_bot+0.02 0.40 0.08], ...
+ 'String', 'Close', ...
+ 'Callback', 'close(gcf)');
+
+ hndlList=[fig hPop1 hPop2 hPop3 hPop4 hPop5 hPop6];
+ set(fig, 'UserData', hndlList);
+ set(fig, 'HandleVisibility', 'callback');
+
+elseif strcmp(action, 'popup1'),
+
+ hndlList=get(gcf,'UserData');
+ hPop = hndlList(2);
+
+ selected = get(hPop, 'Val');
+ set(hPop, 'Val', [1]);
+
+ switch selected
+ case 2
+ demmlp1;
+ case 3
+ demrbf1;
+ case 4
+ demmdn1;
+ end;
+
+elseif strcmp(action,'popup2'),
+
+ hndlList=get(gcf,'UserData');
+ hPop = hndlList(3);
+
+ selected = get(hPop, 'Val');
+ set(hPop, 'Val', [1]);
+
+ switch selected
+ case 2
+ demglm1;
+ case 3
+ demglm2;
+ case 4
+ demmlp2;
+ case 5
+ demknn1;
+ end
+
+elseif strcmp(action,'popup3'),
+
+ hndlList=get(gcf,'UserData');
+ hPop = hndlList(4);
+
+ selected = get(hPop, 'Val');
+ set(hPop, 'Val', [1]);
+
+ switch selected
+ case 2
+ demgmm1;
+ case 3
+ demgmm2;
+ case 4
+ demgmm3;
+ case 5
+ demgmm4;
+ case 6
+ demns1;
+ case 7
+ demgtm1;
+ case 8
+ demgtm2;
+ case 9
+ demkmn1;
+ case 10
+ demsom1;
+ end
+
+elseif strcmp(action,'popup4'),
+
+ hndlList=get(gcf,'UserData');
+ hPop = hndlList(5);
+
+ selected = get(hPop, 'Val');
+ set(hPop, 'Val', [1]);
+
+ switch selected
+ case 2
+ demprior;
+ case 3
+ demev1;
+ case 4
+ demev3;
+ case 5
+ demev2;
+ case 6
+ demard;
+ case 7
+ demprgp;
+ case 8
+ demgp;
+ case 9
+ demgpard;
+ end
+
+elseif strcmp(action,'popup5'),
+
+ hndlList=get(gcf,'UserData');
+ hPop = hndlList(6);
+
+ selected = get(hPop, 'Val');
+ set(hPop, 'Val', [1]);
+
+ switch selected
+ case 2
+ demopt1;
+ case 3
+ demolgd1;
+ case 4
+ demhint;
+ end
+
+
+elseif strcmp(action,'popup6'),
+
+ hndlList=get(gcf,'UserData');
+ hPop = hndlList(7);
+
+ selected = get(hPop, 'Val');
+ set(hPop, 'Val', [1]);
+
+ switch selected
+ case 2
+ demgauss;
+ case 3
+ demmet1;
+ case 4
+ demhmc1;
+ case 5
+ demhmc2;
+ case 6
+ demhmc3;
+ end
+
+elseif strcmp(action, 'help'),
+
+ helpStr = {'To run a demo, press the appropriate button.'; ...
+ 'Instructions and information will appear in the Matlab';...
+ 'command window.'};
+
+ hHelpDlg = helpdlg(helpStr, 'Netlab Demo Help');
+
+end;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demns1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demns1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,114 @@
+%DEMNS1 Demonstrate Neuroscale for visualisation.
+%
+% Description
+% This script demonstrates the use of the Neuroscale algorithm for
+% topographic projection and visualisation. A data sample is generated
+% from a mixture of two Gaussians in 4d space, and an RBF is trained
+% with the stress error function to project the data into 2d. The
+% training data and a test sample are both plotted in this projection.
+%
+% See also
+% RBF, RBFTRAIN, RBFPRIOR
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Generate the data
+% Fix seeds for reproducible results
+rand('state', 420);
+randn('state', 420);
+
+input_dim = 4;
+output_dim = 2;
+mix = gmm(input_dim, 2, 'spherical');
+mix.centres = [1 1 1 1; 0 0 0 0];
+mix.priors = [0.5 0.5];
+mix.covars = [0.1 0.1];
+
+ndata = 60;
+[data, labels] = gmmsamp(mix, ndata);
+
+clc
+disp('This demonstration illustrates the use of the Neuroscale model')
+disp('to perform a topographic projection of data. We begin by generating')
+disp('60 data points from a mixture of two Gaussians in 4 dimensional space.')
+disp(' ')
+disp('Press any key to continue')
+pause
+
+ncentres = 10;
+net = rbf(input_dim, ncentres, output_dim, 'tps', 'neuroscale');
+dstring = ['the Sammon mapping. The model has ', num2str(ncentres), ...
+ ' centres, two outputs, and uses'];
+clc
+disp('The Neuroscale model is an RBF with a Stress error measure as used in')
+disp(dstring)
+disp('thin plate spline basis functions.')
+disp(' ')
+disp('It is trained using the shadow targets algorithm for at most 60 iterations.')
+disp(' ')
+disp('Press any key to continue')
+pause
+
+% First row controls shadow targets, second row controls rbfsetbf
+options(1, :) = foptions;
+options(2, :) = foptions;
+options(1, 1) = 1;
+options(1, 2) = 1e-2;
+options(1, 3) = 1e-2;
+options(1, 6) = 1; % Switch on PCA initialisation
+options(1, 14) = 60;
+options(2, 1) = -1; % Switch off all warnings
+options(2, 5) = 1;
+options(2, 14) = 10;
+net2 = rbftrain(net, options, data);
+
+disp(' ')
+disp('After training the model, we project the training data by a normal')
+disp('forward propagation through the RBF network. Because there are two')
+disp('outputs, the results can be plotted and visualised.')
+disp(' ')
+disp('Press any key to continue')
+pause
+
+% Plot the result
+y = rbffwd(net2, data);
+ClassSymbol1 = 'r.';
+ClassSymbol2 = 'b.';
+PointSize = 12;
+fh1 = figure;
+hold on;
+plot(y((labels==1),1),y(labels==1,2),ClassSymbol1, 'MarkerSize', PointSize)
+plot(y((labels>1),1),y(labels>1,2),ClassSymbol2, 'MarkerSize', PointSize)
+
+disp(' ')
+disp('In this plot, the red dots denote the first class and the blue')
+disp('dots the second class.')
+disp(' ')
+disp('Press any key to continue.')
+disp(' ')
+pause
+
+disp('We now generate a further 100 data points from the original distribution')
+disp('and plot their projection using star symbols. Note that a Sammon')
+disp('mapping cannot be used to generalise to new data in this fashion.')
+
+[test_data, test_labels] = gmmsamp(mix, 100);
+ytest = rbffwd(net2, test_data);
+ClassSymbol1 = 'ro';
+ClassSymbol2 = 'bo';
+% Circles are rather large symbols
+PointSize = 6;
+hold on
+plot(ytest((test_labels==1),1),ytest(test_labels==1,2), ...
+ ClassSymbol1, 'MarkerSize', PointSize)
+plot(ytest((test_labels>1),1),ytest(test_labels>1,2),...
+ ClassSymbol2, 'MarkerSize', PointSize)
+hold on
+legend('Class 1', 'Class 2', 'Test Class 1', 'Test Class 2')
+disp('Press any key to exit.')
+pause
+
+close(fh1);
+clear all;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demolgd1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demolgd1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,115 @@
+%DEMOLGD1 Demonstrate simple MLP optimisation with on-line gradient descent
+%
+% Description
+% The problem consists of one input variable X and one target variable
+% T with data generated by sampling X at equal intervals and then
+% generating target data by computing SIN(2*PI*X) and adding Gaussian
+% noise. A 2-layer network with linear outputs is trained by minimizing
+% a sum-of-squares error function using on-line gradient descent.
+%
+% See also
+% DEMMLP1, OLGD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Generate the matrix of inputs x and targets t.
+
+ndata = 20; % Number of data points.
+noise = 0.2; % Standard deviation of noise distribution.
+x = [0:1/(ndata - 1):1]';
+randn('state', 42);
+rand('state', 42);
+t = sin(2*pi*x) + noise*randn(ndata, 1);
+
+clc
+disp('This demonstration illustrates the use of the on-line gradient')
+disp('descent algorithm to train a Multi-Layer Perceptron network for')
+disp('regression problems. It is intended to illustrate the drawbacks')
+disp('of this algorithm compared to more powerful non-linear optimisation')
+disp('algorithms, such as conjugate gradients.')
+disp(' ')
+disp('First we generate the data from a noisy sine function and construct')
+disp('the network.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Set up network parameters.
+nin = 1; % Number of inputs.
+nhidden = 3; % Number of hidden units.
+nout = 1; % Number of outputs.
+alpha = 0.01; % Coefficient of weight-decay prior.
+
+% Create and initialize network weight vector.
+net = mlp(nin, nhidden, nout, 'linear');
+% Initialise weights reasonably close to 0
+net = mlpinit(net, 10);
+
+% Set up vector of options for the optimiser.
+options = foptions;
+options(1) = 1; % This provides display of error values.
+options(14) = 20; % Number of training cycles.
+options(18) = 0.1; % Learning rate
+%options(17) = 0.4; % Momentum
+options(17) = 0.4; % Momentum
+options(5) = 1; % Do randomise pattern order
+clc
+disp('Then we set the options for the training algorithm.')
+disp(['In the first phase of training, which lasts for ',...
+ num2str(options(14)), ' cycles,'])
+disp(['the learning rate is ', num2str(options(18)), ...
+ ' and the momentum is ', num2str(options(17)), '.'])
+disp('The error values are displayed at the end of each pass through the')
+disp('entire pattern set.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Train using online gradient descent
+[net, options] = olgd(net, options, x, t);
+
+% Now allow learning rate to decay and remove momentum
+options(2) = 0;
+options(3) = 0;
+options(17) = 0.4; % Turn off momentum
+options(5) = 1; % Randomise pattern order
+options(6) = 1; % Set learning rate decay on
+options(14) = 200;
+options(18) = 0.1; % Initial learning rate
+
+disp(['In the second phase of training, which lasts for up to ',...
+ num2str(options(14)), ' cycles,'])
+disp(['the learning rate starts at ', num2str(options(18)), ...
+ ', decaying at 1/t and the momentum is ', num2str(options(17)), '.'])
+disp(' ')
+disp('Press any key to continue.')
+pause
+[net, options] = olgd(net, options, x, t);
+
+clc
+disp('Now we plot the data, underlying function, and network outputs')
+disp('on a single graph to compare the results.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Plot the data, the original function, and the trained network function.
+plotvals = [0:0.01:1]';
+y = mlpfwd(net, plotvals);
+fh1 = figure;
+plot(x, t, 'ob')
+hold on
+axis([0 1 -1.5 1.5])
+fplot('sin(2*pi*x)', [0 1], '--g')
+plot(plotvals, y, '-r')
+legend('data', 'function', 'network');
+hold off
+
+disp('Note the very poor fit to the data: this should be compared with')
+disp('the results obtained in demmlp1.')
+disp(' ')
+disp('Press any key to exit.')
+pause
+close(fh1);
+clear all;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demopt1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demopt1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,170 @@
+function demopt1(xinit)
+%DEMOPT1 Demonstrate different optimisers on Rosenbrock's function.
+%
+% Description
+% The four general optimisers (quasi-Newton, conjugate gradients,
+% scaled conjugate gradients, and gradient descent) are applied to the
+% minimisation of Rosenbrock's well known `banana' function. Each
+% optimiser is run for at most 100 cycles, and a stopping criterion of
+% 1.0e-4 is used for both position and function value. At the end, the
+% trajectory of each algorithm is shown on a contour plot of the
+% function.
+%
+% DEMOPT1(XINIT) allows the user to specify a row vector with two
+% columns as the starting point. The default is the point [-1 1]. Note
+% that the contour plot has an x range of [-1.5, 1.5] and a y range of
+% [-0.5, 2.1], so it is best to choose a starting point in the same
+% region.
+%
+% See also
+% CONJGRAD, GRADDESC, QUASINEW, SCG, ROSEN, ROSEGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Initialise start point for search
+if nargin < 1 | size(xinit) ~= [1 2]
+ xinit = [-1 1]; % Traditional start point
+end
+
+% Find out if flops is available (i.e. pre-version 6 Matlab)
+v = version;
+if (str2num(strtok(v, '.')) >= 6)
+ flops_works = logical(0);
+else
+ flops_works = logical(1);
+end
+
+% Set up options
+options = foptions; % Standard options
+options(1) = -1; % Turn off printing completely
+options(3) = 1e-8; % Tolerance in value of function
+options(14) = 100; % Max. 100 iterations of algorithm
+
+clc
+disp('This demonstration compares the performance of four generic')
+disp('optimisation routines when finding the minimum of Rosenbrock''s')
+disp('function y = 100*(x2-x1^2)^2 + (1-x1)^2.')
+disp(' ')
+disp('The global minimum of this function is at [1 1].')
+disp(['Each algorithm starts at the point [' num2str(xinit(1))...
+ ' ' num2str(xinit(2)) '].'])
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+% Generate a contour plot of the function
+a = -1.5:.02:1.5;
+b = -0.5:.02:2.1;
+[A, B] = meshgrid(a, b);
+Z = rosen([A(:), B(:)]);
+Z = reshape(Z, length(b), length(a));
+l = -1:6;
+v = 2.^l;
+fh1 = figure;
+contour(a, b, Z, v)
+title('Contour plot of Rosenbrock''s function')
+hold on
+
+clc
+disp('We now use quasi-Newton, conjugate gradient, scaled conjugate')
+disp('gradient, and gradient descent with line search algorithms')
+disp('to find a local minimum of this function. Each algorithm is stopped')
+disp('when 100 cycles have elapsed, or if the change in function value')
+disp('is less than 1.0e-8 or the change in the input vector is less than')
+disp('1.0e-4 in magnitude.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+
+clc
+x = xinit;
+flops(0)
+[x, options, errlog, pointlog] = quasinew('rosen', x, options, 'rosegrad');
+fprintf(1, 'For quasi-Newton method:\n')
+fprintf(1, 'Final point is (%f, %f), value is %f\n', x(1), x(2), options(8))
+fprintf(1, 'Number of function evaluations is %d\n', options(10))
+fprintf(1, 'Number of gradient evaluations is %d\n', options(11))
+if flops_works
+ opt_flops = flops;
+ fprintf(1, 'Number of floating point operations is %d\n', opt_flops)
+end
+fprintf(1, 'Number of cycles is %d\n', size(pointlog, 1) - 1);
+disp(' ')
+
+x = xinit;
+flops(0)
+[x, options, errlog2, pointlog2] = conjgrad('rosen', x, options, 'rosegrad');
+fprintf(1, 'For conjugate gradient method:\n')
+fprintf(1, 'Final point is (%f, %f), value is %f\n', x(1), x(2), options(8))
+fprintf(1, 'Number of function evaluations is %d\n', options(10))
+fprintf(1, 'Number of gradient evaluations is %d\n', options(11))
+if flops_works
+ opt_flops = flops;
+ fprintf(1, 'Number of floating point operations is %d\n', ...
+ opt_flops)
+end
+fprintf(1, 'Number of cycles is %d\n', size(pointlog2, 1) - 1);
+disp(' ')
+
+x = xinit;
+flops(0)
+[x, options, errlog3, pointlog3] = scg('rosen', x, options, 'rosegrad');
+fprintf(1, 'For scaled conjugate gradient method:\n')
+fprintf(1, 'Final point is (%f, %f), value is %f\n', x(1), x(2), options(8))
+fprintf(1, 'Number of function evaluations is %d\n', options(10))
+fprintf(1, 'Number of gradient evaluations is %d\n', options(11))
+if flops_works
+ opt_flops = flops;
+ fprintf(1, 'Number of floating point operations is %d\n', opt_flops)
+end
+fprintf(1, 'Number of cycles is %d\n', size(pointlog3, 1) - 1);
+disp(' ')
+
+x = xinit;
+options(7) = 1; % Line minimisation used
+flops(0)
+[x, options, errlog4, pointlog4] = graddesc('rosen', x, options, 'rosegrad');
+fprintf(1, 'For gradient descent method:\n')
+fprintf(1, 'Final point is (%f, %f), value is %f\n', x(1), x(2), options(8))
+fprintf(1, 'Number of function evaluations is %d\n', options(10))
+fprintf(1, 'Number of gradient evaluations is %d\n', options(11))
+if flops_works
+ opt_flops = flops;
+ fprintf(1, 'Number of floating point operations is %d\n', opt_flops)
+end
+fprintf(1, 'Number of cycles is %d\n', size(pointlog4, 1) - 1);
+disp(' ')
+disp('Note that gradient descent does not reach a local minimum in')
+disp('100 cycles.')
+disp(' ')
+disp('On this problem, where the function is cheap to evaluate, the')
+disp('computational effort is dominated by the algorithm overhead.')
+disp('However on more complex optimisation problems (such as those')
+disp('involving neural networks), computational effort is dominated by')
+disp('the number of function and gradient evaluations. Counting these,')
+disp('we can rank the algorithms: quasi-Newton (the best), conjugate')
+disp('gradient, scaled conjugate gradient, gradient descent (the worst)')
+disp(' ')
+disp('Press any key to continue.')
+pause
+clc
+disp('We now plot the trajectory of search points for each algorithm')
+disp('superimposed on the contour plot.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+plot(pointlog4(:,1), pointlog4(:,2), 'bd', 'MarkerSize', 6)
+plot(pointlog3(:,1), pointlog3(:,2), 'mx', 'MarkerSize', 6, 'LineWidth', 2)
+plot(pointlog(:,1), pointlog(:,2), 'k.', 'MarkerSize', 18)
+plot(pointlog2(:,1), pointlog2(:,2), 'g+', 'MarkerSize', 6, 'LineWidth', 2)
+lh = legend( 'Gradient Descent', 'Scaled Conjugate Gradients', ...
+ 'Quasi Newton', 'Conjugate Gradients');
+
+hold off
+
+clc
+disp('Press any key to end.')
+pause
+close(fh1);
+clear all;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/dempot.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/dempot.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function e = dempot(x, mix)
+%DEMPOT Computes the negative log likelihood for a mixture model.
+%
+% Description
+% This function computes the negative log of the unconditional data
+% density P(X) for a Gaussian mixture model. The data structure MIX
+% defines the mixture model, while the matrix X contains the data
+% vectors.
+%
+% See also
+% DEMGPOT, DEMHMC1, DEMMET1
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Computes the potential (negative log likelihood)
+e = -log(gmmprob(mix, x));
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demprgp.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demprgp.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,542 @@
+function demprgp(action);
+%DEMPRGP Demonstrate sampling from a Gaussian Process prior.
+%
+% Description
+% This function plots the functions represented by a Gaussian Process
+% model. The hyperparameter values can be adjusted on a linear scale
+% using the sliders (though the exponential of the parameters is used
+% in the covariance function), or by typing values into the text boxes
+% and pressing the return key. Both types of covariance function are
+% supported. An extra function specific parameter is needed for the
+% rational quadratic function.
+%
+% See also
+% GP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargin<1,
+ action='initialize';
+end;
+
+if strcmp(action,'initialize')
+
+ % Bounds on hyperparameter values
+ biasminval = -3.0; biasmaxval = 3.0;
+ noiseminval = -20; noisemaxval = -2;
+ fparminval = 0.0; fparmaxval = 2.0;
+ inwminval = 0; inwmaxval = 8;
+ % Initial hyperparameter values
+ bias = (biasminval+biasmaxval)/2;
+ noise = (noiseminval+noisemaxval)/2;
+ inweights = (inwminval+inwmaxval)/2;
+ fpar = (fparminval+fparmaxval)/2;
+ fpar2 = (fparminval+fparmaxval)/2;
+
+ gptype = 'sqexp';
+
+ % Create FIGURE
+ fig=figure( ...
+ 'Name','Sampling from a Gaussian Process prior', ...
+ 'Position', [50 50 480 380], ...
+ 'NumberTitle','off', ...
+ 'Color', [0.8 0.8 0.8], ...
+ 'Visible','on');
+
+ % List box for covariance function type
+ nettype_box = uicontrol(fig, ...
+ 'Style', 'listbox', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'center', ...
+ 'Position', [0.52 0.77 0.40 0.12], ...
+ 'String', 'Squared Exponential|Rational Quadratic', ...
+ 'Max', 1, 'Min', 0, ... % Only allow one selection
+ 'Value', 1, ... % Initial value is squared exponential
+ 'BackgroundColor',[0.60 0.60 0.60],...
+ 'CallBack', 'demprgp GPtype');
+
+ % Title for list box
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.52 0.89 0.40 0.05], ...
+ 'String', 'Covariance Function Type', ...
+ 'BackgroundColor', get(fig, 'Color'), ...
+ 'HorizontalAlignment', 'center');
+
+ % Frames to enclose sliders
+ bottom_row = 0.04;
+ slider_frame_height = 0.15;
+ biasframe = uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'String', 'bias', ...
+ 'HorizontalAlignment', 'left', ...
+ 'Position', [0.05 bottom_row 0.35 slider_frame_height]);
+
+ bpos = get(biasframe, 'Position');
+ noise_frame_bottom = bpos(2) + bpos(4) + 0.02;
+ noiseframe = uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.05 noise_frame_bottom 0.35 slider_frame_height]);
+
+ npos = get(noiseframe, 'Position');
+ inw_frame_bottom = npos(2) + npos(4) + 0.02;
+ inwframe = uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.05 inw_frame_bottom 0.35 slider_frame_height]);
+
+ inwpos = get(inwframe, 'Position');
+ fpar_frame_bottom = inwpos(2) + inwpos(4) + 0.02;
+ % This frame sometimes has multiple parameters
+ uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.05 fpar_frame_bottom 0.35 2*slider_frame_height]);
+
+ % Frame text
+ slider_text_height = 0.05;
+ slider_text_voffset = 0.08;
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.07 bottom_row+slider_text_voffset ...
+ 0.06 slider_text_height], ...
+ 'String', 'bias');
+
+ % Frame text
+ noiseframe = uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.07 noise_frame_bottom+slider_text_voffset ...
+ 0.08 slider_text_height], ...
+ 'String', 'noise');
+
+ % Frame text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.07 inw_frame_bottom+slider_text_voffset ...
+ 0.14 slider_text_height], ...
+ 'String', 'inweights');
+
+ % Frame text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.07 fpar_frame_bottom+slider_frame_height+ ...
+ slider_text_voffset 0.06 slider_text_height], ...
+ 'String', 'fpar');
+
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.07 fpar_frame_bottom+slider_text_voffset ...
+ 0.06 slider_text_height], ...
+ 'String', 'fpar2', ...
+ 'Tag', 'fpar2text', ...
+ 'Enable', 'off');
+
+ % Slider
+ slider_left = 0.07;
+ slider_width = 0.31;
+ slider_frame_voffset = 0.02;
+ biasslide = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Value', bias, ...
+ 'BackgroundColor', [0.8 0.8 0.8], ...
+ 'Position', [slider_left bottom_row+slider_frame_voffset ...
+ slider_width 0.05], ...
+ 'Min', biasminval, 'Max', biasmaxval, ...
+ 'Callback', 'demprgp update');
+
+ % Slider
+ noiseslide = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Value', noise, ...
+ 'BackgroundColor', [0.8 0.8 0.8], ...
+ 'Position', [slider_left noise_frame_bottom+slider_frame_voffset ...
+ slider_width 0.05], ...
+ 'Min', noiseminval, 'Max', noisemaxval, ...
+ 'Callback', 'demprgp update');
+
+ % Slider
+ inweightsslide = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Value', inweights, ...
+ 'BackgroundColor', [0.8 0.8 0.8], ...
+ 'Position', [slider_left inw_frame_bottom+slider_frame_voffset ...
+ slider_width 0.05], ...
+ 'Min', inwminval, 'Max', inwmaxval, ...
+ 'Callback', 'demprgp update');
+
+ % Slider
+ fparslide = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Value', fpar, ...
+ 'BackgroundColor', [0.8 0.8 0.8], ...
+ 'Position', [slider_left fpar_frame_bottom+slider_frame_height+ ...
+ slider_frame_voffset slider_width 0.05], ...
+ 'Min', fparminval, 'Max', fparmaxval, ...
+ 'Callback', 'demprgp update');
+
+ fpar2slide = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Value', fpar2, ...
+ 'BackgroundColor', [0.8 0.8 0.8], ...
+ 'Position', [slider_left fpar_frame_bottom+slider_frame_voffset ...
+ slider_width 0.05], ...
+ 'Min', fparminval, 'Max', fparmaxval, ...
+ 'Callback', 'demprgp update', ...
+ 'Tag', 'fpar2slider', ...
+ 'Enable', 'off');
+
+ % Text display of hyper-parameter values
+
+ format = '%8f';
+
+ hp_left = 0.20;
+ hp_width = 0.17;
+ biasval = uicontrol(fig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'Position', [hp_left bottom_row+slider_text_voffset ...
+ hp_width slider_text_height], ...
+ 'String', sprintf(format, bias), ...
+ 'Callback', 'demprgp newval');
+
+ noiseval = uicontrol(fig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'Position', [hp_left noise_frame_bottom+slider_text_voffset ...
+ hp_width slider_text_height], ...
+ 'String', sprintf(format, noise), ...
+ 'Callback', 'demprgp newval');
+
+ inweightsval = uicontrol(fig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'Position', [hp_left inw_frame_bottom+slider_text_voffset ...
+ hp_width slider_text_height], ...
+ 'String', sprintf(format, inweights), ...
+ 'Callback', 'demprgp newval');
+
+ fparval = uicontrol(fig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'Position', [hp_left fpar_frame_bottom+slider_frame_height+ ...
+ slider_text_voffset hp_width slider_text_height], ...
+ 'String', sprintf(format, fpar), ...
+ 'Callback', 'demprgp newval');
+
+ fpar2val = uicontrol(fig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'Position', [hp_left fpar_frame_bottom+slider_text_voffset ...
+ hp_width slider_text_height], ...
+ 'String', sprintf(format, fpar), ...
+ 'Callback', 'demprgp newval', ...
+ 'Enable', 'off', ...
+ 'Tag', 'fpar2val');
+
+
+ % The graph box
+ haxes = axes('Position', [0.5 0.28 0.45 0.45], ...
+ 'Units', 'normalized', ...
+ 'Visible', 'on');
+
+ % The SAMPLE button
+ uicontrol(fig, ...
+ 'Style','push', ...
+ 'Units','normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position',[0.5 bottom_row 0.13 0.1], ...
+ 'String','Sample', ...
+ 'Callback','demprgp replot');
+
+ % The CLOSE button
+ uicontrol(fig, ...
+ 'Style','push', ...
+ 'Units','normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position',[0.82 bottom_row 0.13 0.1], ...
+ 'String','Close', ...
+ 'Callback','close(gcf)');
+
+ % The HELP button
+ uicontrol(fig, ...
+ 'Style','push', ...
+ 'Units','normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position',[0.66 bottom_row 0.13 0.1], ...
+ 'String','Help', ...
+ 'Callback','demprgp help');
+
+ % Save handles to objects
+
+ hndlList=[fig biasslide noiseslide inweightsslide fparslide ...
+ biasval noiseval inweightsval ...
+ fparval haxes nettype_box];
+ set(fig, 'UserData', hndlList);
+
+ demprgp('replot')
+
+
+elseif strcmp(action, 'update'),
+
+ % Update when a slider is moved.
+
+ hndlList = get(gcf, 'UserData');
+ biasslide = hndlList(2);
+ noiseslide = hndlList(3);
+ inweightsslide = hndlList(4);
+ fparslide = hndlList(5);
+ biasval = hndlList(6);
+ noiseval = hndlList(7);
+ inweightsval = hndlList(8);
+ fparval = hndlList(9);
+ haxes = hndlList(10);
+ nettype_box = hndlList(11);
+
+
+ bias = get(biasslide, 'Value');
+ noise = get(noiseslide, 'Value');
+ inweights = get(inweightsslide, 'Value');
+ fpar = get(fparslide, 'Value');
+ fpar2 = get(findobj('Tag', 'fpar2slider'), 'Value');
+
+ format = '%8f';
+ set(biasval, 'String', sprintf(format, bias));
+ set(noiseval, 'String', sprintf(format, noise));
+ set(inweightsval, 'String', sprintf(format, inweights));
+ set(fparval, 'String', sprintf(format, fpar));
+ set(findobj('Tag', 'fpar2val'), 'String', ...
+ sprintf(format, fpar2));
+
+ demprgp('replot');
+
+elseif strcmp(action, 'newval'),
+
+ % Update when text is changed.
+
+ hndlList = get(gcf, 'UserData');
+ biasslide = hndlList(2);
+ noiseslide = hndlList(3);
+ inweightsslide = hndlList(4);
+ fparslide = hndlList(5);
+ biasval = hndlList(6);
+ noiseval = hndlList(7);
+ inweightsval = hndlList(8);
+ fparval = hndlList(9);
+ haxes = hndlList(10);
+
+ bias = sscanf(get(biasval, 'String'), '%f');
+ noise = sscanf(get(noiseval, 'String'), '%f');
+ inweights = sscanf(get(inweightsval, 'String'), '%f');
+ fpar = sscanf(get(fparval, 'String'), '%f');
+ fpar2 = sscanf(get(findobj('Tag', 'fpar2val'), 'String'), '%f');
+
+ set(biasslide, 'Value', bias);
+ set(noiseslide, 'Value', noise);
+ set(inweightsslide, 'Value', inweights);
+ set(fparslide, 'Value', fpar);
+ set(findobj('Tag', 'fpar2slider'), 'Value', fpar2);
+
+ demprgp('replot');
+
+elseif strcmp(action, 'GPtype')
+ hndlList = get(gcf, 'UserData');
+ nettype_box = hndlList(11);
+ gptval = get(nettype_box, 'Value');
+ if gptval == 1
+ % Squared exponential, so turn off fpar2
+ set(findobj('Tag', 'fpar2text'), 'Enable', 'off');
+ set(findobj('Tag', 'fpar2slider'), 'Enable', 'off');
+ set(findobj('Tag', 'fpar2val'), 'Enable', 'off');
+ else
+ % Rational quadratic, so turn on fpar2
+ set(findobj('Tag', 'fpar2text'), 'Enable', 'on');
+ set(findobj('Tag', 'fpar2slider'), 'Enable', 'on');
+ set(findobj('Tag', 'fpar2val'), 'Enable', 'on');
+ end
+ demprgp('replot');
+
+elseif strcmp(action, 'replot'),
+
+ % Re-sample from the prior and plot graphs.
+
+ oldFigNumber=watchon;
+
+ hndlList = get(gcf, 'UserData');
+ biasslide = hndlList(2);
+ noiseslide = hndlList(3);
+ inweightsslide = hndlList(4);
+ fparslide = hndlList(5);
+ haxes = hndlList(10);
+ nettype_box = hndlList(11);
+ gptval = get(nettype_box, 'Value');
+ if gptval == 1
+ gptype = 'sqexp';
+ else
+ gptype = 'ratquad';
+ end
+
+ bias = get(biasslide, 'Value');
+ noise = get(noiseslide, 'Value');
+ inweights = get(inweightsslide, 'Value');
+ fpar = get(fparslide, 'Value');
+
+
+ axes(haxes);
+ cla
+ set(gca, ...
+ 'Box', 'on', ...
+ 'Color', [0 0 0], ...
+ 'XColor', [0 0 0], ...
+ 'YColor', [0 0 0], ...
+ 'FontSize', 14);
+ ymin = -10;
+ ymax = 10;
+ axis([-1 1 ymin ymax]);
+ set(gca,'DefaultLineLineWidth', 2);
+
+ xvals = (-1:0.01:1)';
+ nsample = 10; % Number of samples from prior.
+ hold on
+ plot([-1 0; 1 0], [0 ymin; 0 ymax], 'b--');
+ net = gp(1, gptype);
+ net.bias = bias;
+ net.noise = noise;
+ net.inweights = inweights;
+ if strcmp(gptype, 'sqexp')
+ net.fpar = fpar;
+ else
+ fpar2 = get(findobj('Tag', 'fpar2slider'), 'Value');
+ net.fpar = [fpar fpar2];
+ end
+ cn = gpcovar(net, xvals);
+ cninv = inv(cn);
+ cnchol = chol(cn);
+ set(gca, 'DefaultLineLineWidth', 1);
+ for n = 1:nsample
+ y = (cnchol') * randn(size(xvals));
+ plot(xvals, y, 'y');
+ end
+
+ watchoff(oldFigNumber);
+
+elseif strcmp(action, 'help'),
+
+ % Provide help to user.
+
+ oldFigNumber=watchon;
+
+ helpfig = figure('Position', [100 100 480 400], ...
+ 'Name', 'Help', ...
+ 'NumberTitle', 'off', ...
+ 'Color', [0.8 0.8 0.8], ...
+ 'Visible','on');
+
+ % The HELP TITLE BAR frame
+ uicontrol(helpfig, ...
+ 'Style','frame', ...
+ 'Units','normalized', ...
+ 'HorizontalAlignment', 'center', ...
+ 'Position', [0.05 0.82 0.9 0.1], ...
+ 'BackgroundColor',[0.60 0.60 0.60]);
+
+ % The HELP TITLE BAR text
+ uicontrol(helpfig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.26 0.85 0.6 0.05], ...
+ 'HorizontalAlignment', 'left', ...
+ 'String', 'Help: Sampling from a Gaussian Process Prior');
+
+ helpstr1 = strcat(...
+ 'This demonstration shows the effects of sampling from a Gaussian', ...
+ ' process prior. The parameters bias, noise, inweights and fpar', ...
+ ' control the corresponding terms in the covariance function of the',...
+ ' Gaussian process. Their values can be adjusted on a linear scale',...
+ ' using the sliders, or by typing values into the text boxes and',...
+ ' pressing the return key. After setting these values, press the',...
+ ' ''Sample'' button to see a new sample from the prior.');
+
+ helpstr2 = strcat(...
+ 'Observe how inweights controls horizontal length-scale of the',...
+ ' variation in the functions, noise controls the roughness of the',...
+ ' functions, and the bias controls the size of the', ...
+ ' vertical offset of the signal.');
+ helpstr3 = strcat(...
+ 'There are two types of covariance function supported by', ...
+ ' Netlab which can be selected using the ''Covariance Function', ...
+ ' Type'' menu.');
+ helpstr4 = strcat(...
+ 'The squared exponential has a single fpar which', ...
+ ' controls the vertical scale of the process.');
+ helpstr5 = strcat(...
+ 'The rational quadratic has two fpar values. The first is', ...
+ ' is a scale parameter inside the rational function like the',...
+ ' first fpar for the squared exponential covariance, while the', ...
+ ' second gives the exponent of the rational function (i.e. the',...
+ ' rate of decay of the covariance function.');
+ % Set up cell array with help strings
+ hstr(1) = {helpstr1};
+ hstr(2) = {''};
+ hstr(3) = {helpstr2};
+ hstr(4) = {''};
+ hstr(5) = {helpstr3};
+ hstr(6) = {''};
+ hstr(7) = {helpstr4};
+ hstr(8) = {''};
+ hstr(9) = {helpstr5};
+
+ % The HELP text
+ helpui = uicontrol(helpfig, ...
+ 'Style', 'Text', ...
+ 'Units', 'normalized', ...
+ 'ForegroundColor', [0 0 0], ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [1 1 1], ...
+ 'Min', 0, ...
+ 'Max', 2, ...
+ 'Position', [0.05 0.2 0.9 0.57]);
+ [hstrw, newpos] = textwrap(helpui, hstr);
+ set(helpui, 'String', hstrw, 'Position', [0.05, 0.2, 0.9 newpos(4)]);
+
+ % The CLOSE button
+ uicontrol(helpfig, ...
+ 'Style','push', ...
+ 'Units','normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position',[0.4 0.05 0.2 0.1], ...
+ 'String','Close', ...
+ 'Callback','close(gcf)');
+
+ watchoff(oldFigNumber);
+
+end;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demprior.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demprior.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,406 @@
+function demprior(action);
+%DEMPRIOR Demonstrate sampling from a multi-parameter Gaussian prior.
+%
+% Description
+% This function plots the functions represented by a multi-layer
+% perceptron network when the weights are set to values drawn from a
+% Gaussian prior distribution. The parameters AW1, AB1 AW2 and AB2
+% control the inverse variances of the first-layer weights, the hidden
+% unit biases, the second-layer weights and the output unit biases
+% respectively. Their values can be adjusted on a logarithmic scale
+% using the sliders, or by typing values into the text boxes and
+% pressing the return key.
+%
+% See also
+% MLP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargin<1,
+ action='initialize';
+end;
+
+if strcmp(action,'initialize')
+
+ aw1 = 0.01;
+ ab1 = 0.1;
+ aw2 = 1.0;
+ ab2 = 1.0;
+
+ % Create FIGURE
+ fig=figure( ...
+ 'Name','Sampling from a Gaussian prior', ...
+ 'Position', [50 50 480 380], ...
+ 'NumberTitle','off', ...
+ 'Color', [0.8 0.8 0.8], ...
+ 'Visible','on');
+
+ % The TITLE BAR frame
+ uicontrol(fig, ...
+ 'Style','frame', ...
+ 'Units','normalized', ...
+ 'HorizontalAlignment', 'center', ...
+ 'Position', [0.5 0.82 0.45 0.1], ...
+ 'BackgroundColor',[0.60 0.60 0.60]);
+
+ % The TITLE BAR text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.54 0.85 0.40 0.05], ...
+ 'HorizontalAlignment', 'left', ...
+ 'String', 'Sampling from a Gaussian prior');
+
+ % Frames to enclose sliders
+ uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.05 0.08 0.35 0.18]);
+
+ uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.05 0.3 0.35 0.18]);
+
+ uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.05 0.52 0.35 0.18]);
+
+ uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.05 0.74 0.35 0.18]);
+
+ % Frame text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.07 0.17 0.06 0.07], ...
+ 'String', 'aw1');
+
+ % Frame text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.07 0.39 0.06 0.07], ...
+ 'String', 'ab1');
+
+ % Frame text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.07 0.61 0.06 0.07], ...
+ 'String', 'aw2');
+
+ % Frame text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.07 0.83 0.06 0.07], ...
+ 'String', 'ab2');
+
+ % Slider
+ minval = -5; maxval = 5;
+ aw1slide = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Value', log10(aw1), ...
+ 'BackgroundColor', [0.8 0.8 0.8], ...
+ 'Position', [0.07 0.1 0.31 0.05], ...
+ 'Min', minval, 'Max', maxval, ...
+ 'Callback', 'demprior update');
+
+ % Slider
+ ab1slide = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Value', log10(ab1), ...
+ 'BackgroundColor', [0.8 0.8 0.8], ...
+ 'Position', [0.07 0.32 0.31 0.05], ...
+ 'Min', minval, 'Max', maxval, ...
+ 'Callback', 'demprior update');
+
+ % Slider
+ aw2slide = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Value', log10(aw2), ...
+ 'BackgroundColor', [0.8 0.8 0.8], ...
+ 'Position', [0.07 0.54 0.31 0.05], ...
+ 'Min', minval, 'Max', maxval, ...
+ 'Callback', 'demprior update');
+
+ % Slider
+ ab2slide = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Value', log10(ab2), ...
+ 'BackgroundColor', [0.8 0.8 0.8], ...
+ 'Position', [0.07 0.76 0.31 0.05], ...
+ 'Min', minval, 'Max', maxval, ...
+ 'Callback', 'demprior update');
+
+ % The graph box
+ haxes = axes('Position', [0.5 0.28 0.45 0.45], ...
+ 'Units', 'normalized', ...
+ 'Visible', 'on');
+
+ % Text display of hyper-parameter values
+
+ format = '%8f';
+
+ aw1val = uicontrol(fig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.15 0.17 0.23 0.07], ...
+ 'String', sprintf(format, aw1), ...
+ 'Callback', 'demprior newval');
+
+ ab1val = uicontrol(fig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.15 0.39 0.23 0.07], ...
+ 'String', sprintf(format, ab1), ...
+ 'Callback', 'demprior newval');
+
+ aw2val = uicontrol(fig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.15 0.61 0.23 0.07], ...
+ 'String', sprintf(format, aw2), ...
+ 'Callback', 'demprior newval');
+
+ ab2val = uicontrol(fig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.15 0.83 0.23 0.07], ...
+ 'String', sprintf(format, ab2), ...
+ 'Callback', 'demprior newval');
+
+ % The SAMPLE button
+ uicontrol(fig, ...
+ 'Style','push', ...
+ 'Units','normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position',[0.5 0.08 0.13 0.1], ...
+ 'String','Sample', ...
+ 'Callback','demprior replot');
+
+ % The CLOSE button
+ uicontrol(fig, ...
+ 'Style','push', ...
+ 'Units','normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position',[0.82 0.08 0.13 0.1], ...
+ 'String','Close', ...
+ 'Callback','close(gcf)');
+
+ % The HELP button
+ uicontrol(fig, ...
+ 'Style','push', ...
+ 'Units','normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position',[0.66 0.08 0.13 0.1], ...
+ 'String','Help', ...
+ 'Callback','demprior help');
+
+ % Save handles to objects
+
+ hndlList=[fig aw1slide ab1slide aw2slide ab2slide aw1val ab1val aw2val ...
+ ab2val haxes];
+ set(fig, 'UserData', hndlList);
+
+ demprior('replot')
+
+
+elseif strcmp(action, 'update'),
+
+ % Update when a slider is moved.
+
+ hndlList = get(gcf, 'UserData');
+ aw1slide = hndlList(2);
+ ab1slide = hndlList(3);
+ aw2slide = hndlList(4);
+ ab2slide = hndlList(5);
+ aw1val = hndlList(6);
+ ab1val = hndlList(7);
+ aw2val = hndlList(8);
+ ab2val = hndlList(9);
+ haxes = hndlList(10);
+
+ aw1 = 10^get(aw1slide, 'Value');
+ ab1 = 10^get(ab1slide, 'Value');
+ aw2 = 10^get(aw2slide, 'Value');
+ ab2 = 10^get(ab2slide, 'Value');
+
+ format = '%8f';
+ set(aw1val, 'String', sprintf(format, aw1));
+ set(ab1val, 'String', sprintf(format, ab1));
+ set(aw2val, 'String', sprintf(format, aw2));
+ set(ab2val, 'String', sprintf(format, ab2));
+
+ demprior('replot');
+
+elseif strcmp(action, 'newval'),
+
+ % Update when text is changed.
+
+ hndlList = get(gcf, 'UserData');
+ aw1slide = hndlList(2);
+ ab1slide = hndlList(3);
+ aw2slide = hndlList(4);
+ ab2slide = hndlList(5);
+ aw1val = hndlList(6);
+ ab1val = hndlList(7);
+ aw2val = hndlList(8);
+ ab2val = hndlList(9);
+ haxes = hndlList(10);
+
+ aw1 = sscanf(get(aw1val, 'String'), '%f');
+ ab1 = sscanf(get(ab1val, 'String'), '%f');
+ aw2 = sscanf(get(aw2val, 'String'), '%f');
+ ab2 = sscanf(get(ab2val, 'String'), '%f');
+
+ set(aw1slide, 'Value', log10(aw1));
+ set(ab1slide, 'Value', log10(ab1));
+ set(aw2slide, 'Value', log10(aw2));
+ set(ab2slide, 'Value', log10(ab2));
+
+ demprior('replot');
+
+elseif strcmp(action, 'replot'),
+
+ % Re-sample from the prior and plot graphs.
+
+ oldFigNumber=watchon;
+
+ hndlList = get(gcf, 'UserData');
+ aw1slide = hndlList(2);
+ ab1slide = hndlList(3);
+ aw2slide = hndlList(4);
+ ab2slide = hndlList(5);
+ haxes = hndlList(10);
+
+ aw1 = 10^get(aw1slide, 'Value');
+ ab1 = 10^get(ab1slide, 'Value');
+ aw2 = 10^get(aw2slide, 'Value');
+ ab2 = 10^get(ab2slide, 'Value');
+
+ axes(haxes);
+ cla
+ set(gca, ...
+ 'Box', 'on', ...
+ 'Color', [0 0 0], ...
+ 'XColor', [0 0 0], ...
+ 'YColor', [0 0 0], ...
+ 'FontSize', 14);
+ axis([-1 1 -10 10]);
+ set(gca,'DefaultLineLineWidth', 2);
+
+ nhidden = 12;
+ prior = mlpprior(1, nhidden, 1, aw1, ab1, aw2, ab2);
+ xvals = -1:0.005:1;
+ nsample = 10; % Number of samples from prior.
+ hold on
+ plot([-1 0; 1 0], [0 -10; 0 10], 'b--');
+ net = mlp(1, nhidden, 1, 'linear', prior);
+ for i = 1:nsample
+ net = mlpinit(net, prior);
+ yvals = mlpfwd(net, xvals');
+ plot(xvals', yvals, 'y');
+ end
+
+ watchoff(oldFigNumber);
+
+elseif strcmp(action, 'help'),
+
+ % Provide help to user.
+
+ oldFigNumber=watchon;
+
+ helpfig = figure('Position', [100 100 480 400], ...
+ 'Name', 'Help', ...
+ 'NumberTitle', 'off', ...
+ 'Color', [0.8 0.8 0.8], ...
+ 'Visible','on');
+
+ % The HELP TITLE BAR frame
+ uicontrol(helpfig, ...
+ 'Style','frame', ...
+ 'Units','normalized', ...
+ 'HorizontalAlignment', 'center', ...
+ 'Position', [0.05 0.82 0.9 0.1], ...
+ 'BackgroundColor',[0.60 0.60 0.60]);
+
+ % The HELP TITLE BAR text
+ uicontrol(helpfig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position', [0.26 0.85 0.6 0.05], ...
+ 'HorizontalAlignment', 'left', ...
+ 'String', 'Help: Sampling from a Gaussian Prior');
+
+ helpstr1 = strcat( ...
+ 'This demonstration shows the effects of sampling from a Gaussian', ...
+ ' prior over weights for a two-layer feed-forward network. The', ...
+ ' parameters aw1, ab1, aw2 and ab2 control the inverse variances of', ...
+ ' the first-layer weights, the hidden unit biases, the second-layer', ...
+ ' weights and the output unit biases respectively. Their values can', ...
+ ' be adjusted on a logarithmic scale using the sliders, or by', ...
+ ' typing values into the text boxes and pressing the return key.', ...
+ ' After setting these values, press the ''Sample'' button to see a', ...
+ ' new sample from the prior. ');
+ helpstr2 = strcat( ...
+ 'Observe how aw1 controls the horizontal length-scale of the', ...
+ ' variation in the functions, ab1 controls the input range over', ...
+ ' such variations occur, aw2 sets the vertical scale of the output', ...
+ ' and ab2 sets the vertical off-set of the output. The network has', ...
+ ' 12 hidden units. ');
+ hstr(1) = {helpstr1};
+ hstr(2) = {''};
+ hstr(3) = {helpstr2};
+
+ % The HELP text
+ helpui = uicontrol(helpfig, ...
+ 'Style', 'edit', ...
+ 'Units', 'normalized', ...
+ 'ForegroundColor', [0 0 0], ...
+ 'HorizontalAlignment', 'left', ...
+ 'BackgroundColor', [1 1 1], ...
+ 'Min', 0, ...
+ 'Max', 2, ...
+ 'Position', [0.05 0.2 0.9 0.8]);
+
+ [hstrw , newpos] = textwrap(helpui, hstr, 70);
+ set(helpui, 'String', hstrw, 'Position', [0.05, 0.2, 0.9, newpos(4)]);
+
+
+ % The CLOSE button
+ uicontrol(helpfig, ...
+ 'Style','push', ...
+ 'Units','normalized', ...
+ 'BackgroundColor', [0.6 0.6 0.6], ...
+ 'Position',[0.4 0.05 0.2 0.1], ...
+ 'String','Close', ...
+ 'Callback','close(gcf)');
+
+ watchoff(oldFigNumber);
+
+end;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demrbf1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demrbf1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,138 @@
+%DEMRBF1 Demonstrate simple regression using a radial basis function network.
+%
+% Description
+% The problem consists of one input variable X and one target variable
+% T with data generated by sampling X at equal intervals and then
+% generating target data by computing SIN(2*PI*X) and adding Gaussian
+% noise. This data is the same as that used in demmlp1.
+%
+% Three different RBF networks (with different activation functions)
+% are trained in two stages. First, a Gaussian mixture model is trained
+% using the EM algorithm, and the centres of this model are used to set
+% the centres of the RBF. Second, the output weights (and biases) are
+% determined using the pseudo-inverse of the design matrix.
+%
+% See also
+% DEMMLP1, RBF, RBFFWD, GMM, GMMEM
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+% Generate the matrix of inputs x and targets t.
+randn('state', 42);
+rand('state', 42);
+ndata = 20; % Number of data points.
+noise = 0.2; % Standard deviation of noise distribution.
+x = (linspace(0, 1, ndata))';
+t = sin(2*pi*x) + noise*randn(ndata, 1);
+mu = mean(x);
+sigma = std(x);
+tr_in = (x - mu)./(sigma);
+
+clc
+disp('This demonstration illustrates the use of a Radial Basis Function')
+disp('network for regression problems. The data is generated from a noisy')
+disp('sine function.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Set up network parameters.
+nin = 1; % Number of inputs.
+nhidden = 7; % Number of hidden units.
+nout = 1; % Number of outputs.
+
+clc
+disp('We assess the effect of three different activation functions.')
+disp('First we create a network with Gaussian activations.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Create and initialize network weight and parameter vectors.
+net = rbf(nin, nhidden, nout, 'gaussian');
+
+disp('A two-stage training algorithm is used: it uses a small number of')
+disp('iterations of EM to position the centres, and then the pseudo-inverse')
+disp('of the design matrix to find the second layer weights.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+disp('Error values from EM training.')
+% Use fast training method
+options = foptions;
+options(1) = 1; % Display EM training
+options(14) = 10; % number of iterations of EM
+net = rbftrain(net, options, tr_in, t);
+
+disp(' ')
+disp('Press any key to continue.')
+pause
+clc
+disp('The second RBF network has thin plate spline activations.')
+disp('The same centres are used again, so we just need to calculate')
+disp('the second layer weights.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Create a second RBF with thin plate spline functions
+net2 = rbf(nin, nhidden, nout, 'tps');
+
+% Re-use previous centres rather than calling rbftrain again
+net2.c = net.c;
+[y, act2] = rbffwd(net2, tr_in);
+
+% Solve for new output weights and biases from RBF activations
+temp = pinv([act2 ones(ndata, 1)]) * t;
+net2.w2 = temp(1:nhidden, :);
+net2.b2 = temp(nhidden+1, :);
+
+disp('The third RBF network has r^4 log r activations.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Create a third RBF with r^4 log r functions
+net3 = rbf(nin, nhidden, nout, 'r4logr');
+
+% Overwrite weight vector with parameters from first RBF
+net3.c = net.c;
+[y, act3] = rbffwd(net3, tr_in);
+temp = pinv([act3 ones(ndata, 1)]) * t;
+net3.w2 = temp(1:nhidden, :);
+net3.b2 = temp(nhidden+1, :);
+
+disp('Now we plot the data, underlying function, and network outputs')
+disp('on a single graph to compare the results.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+% Plot the data, the original function, and the trained network functions.
+plotvals = [x(1):0.01:x(end)]';
+inputvals = (plotvals-mu)./sigma;
+y = rbffwd(net, inputvals);
+y2 = rbffwd(net2, inputvals);
+y3 = rbffwd(net3, inputvals);
+fh1 = figure;
+
+plot(x, t, 'ob')
+hold on
+xlabel('Input')
+ylabel('Target')
+axis([x(1) x(end) -1.5 1.5])
+[fx, fy] = fplot('sin(2*pi*x)', [x(1) x(end)]);
+plot(fx, fy, '-r', 'LineWidth', 2)
+plot(plotvals, y, '--g', 'LineWidth', 2)
+plot(plotvals, y2, 'k--', 'LineWidth', 2)
+plot(plotvals, y3, '-.c', 'LineWidth', 2)
+legend('data', 'function', 'Gaussian RBF', 'Thin plate spline RBF', ...
+ 'r^4 log r RBF');
+hold off
+
+disp('RBF training errors are');
+disp(['Gaussian ', num2str(rbferr(net, tr_in, t)), ' TPS ', ...
+num2str(rbferr(net2, tr_in, t)), ' R4logr ', num2str(rbferr(net3, tr_in, t))]);
+
+disp(' ')
+disp('Press any key to end.')
+pause
+close(fh1);
+clear all;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demsom1.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demsom1.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,113 @@
+%DEMSOM1 Demonstrate SOM for visualisation.
+%
+% Description
+% This script demonstrates the use of a SOM with a two-dimensional
+% grid to map onto data in two-dimensional space. Both on-line and
+% batch training algorithms are shown.
+%
+% See also
+% SOM, SOMPAK, SOMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+randn('state', 42);
+rand('state', 42);
+nin = 2;
+ndata = 300;
+% Give data an offset so that network has something to learn.
+x = rand(ndata, nin) + ones(ndata, 1)*[1.5 1.5];
+
+clc;
+disp('This demonstration of the SOM, or Kohonen network, shows how the')
+disp('network units after training lie in regions of high data density.')
+disp('First we show the data, which is generated uniformly from a square.')
+disp('Red crosses denote the data and black dots are the initial locations')
+disp('of the SOM units.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+net = som(nin, [8, 7]);
+c1 = sompak(net);
+h1 = figure;
+plot(x(:, 1), x(:, 2), 'r+');
+hold on
+plot(c1(:,1), c1(:, 2), 'k.');
+drawnow; % Force figure to be drawn before training starts
+options = foptions;
+
+% Ordering phase
+options(1) = 1;
+options(14) = 50;
+%options(14) = 5; % Just for testing
+options(18) = 0.9; % Initial learning rate
+options(16) = 0.05; % Final learning rate
+options(17) = 8; % Initial neighbourhood size
+options(15) = 1; % Final neighbourhood size
+
+disp('The SOM network is trained in two phases using an on-line algorithm.')
+disp('Initially the neighbourhood is set to 8 and is then reduced')
+disp('linearly to 1 over the first 50 iterations.')
+disp('Each iteration consists of a pass through the complete')
+disp('dataset, while the weights are adjusted after each pattern.')
+disp('The learning rate is reduced linearly from 0.9 to 0.05.')
+disp('This ordering phase puts the units in a rough grid shape.')
+disp('Blue circles denote the units at the end of this phase.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+net2 = somtrain(net, options, x);
+c2 = sompak(net2);
+plot(c2(:, 1), c2(:, 2), 'bo');
+drawnow;
+
+% Convergence phase
+options(1) = 1;
+options(14) = 400;
+options(18) = 0.05;
+options(16) = 0.01;
+options(17) = 0;
+options(15) = 0;
+
+disp('The second, convergence, phase of learning just updates the winning node.')
+disp('The learning rate is reduced from 0.05 to 0.01 over 400 iterations.')
+disp('Note how the error value does not decrease monotonically; it is')
+disp('difficult to decide when training is complete in a principled way.')
+disp('The units are plotted as green stars.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+net3 = somtrain(net2, options, x);
+c3 = sompak(net3);
+plot(c3(:, 1), c3(:, 2), 'g*');
+drawnow;
+
+% Now try batch training
+options(1) = 1;
+options(6) = 1;
+options(14) = 50;
+options(17) = 3;
+options(15) = 0;
+disp('An alternative approach to the on-line algorithm is a batch update')
+disp('rule. Each unit is updated to be the average weights')
+disp('in a neighbourhood (which reduces from 3 to 0) over 50 iterations.');
+disp('Note how the error is even more unstable at first, though eventually')
+disp('it does converge.')
+disp('The final units are shown as black triangles.')
+disp(' ')
+disp('Press any key to continue.')
+pause
+net4 = somtrain(net, options, x);
+c4 = sompak(net4);
+plot(c4(:, 1), c4(:, 2), 'k^')
+legend('Data', 'Initial weights', 'Weights after ordering', ...
+ 'Weights after convergence', 'Batch weights', 2);
+drawnow;
+
+disp(' ')
+disp('Press any key to end.')
+disp(' ')
+pause
+
+close(h1);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/demtrain.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/demtrain.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,362 @@
+function demtrain(action);
+%DEMTRAIN Demonstrate training of MLP network.
+%
+% Description
+% DEMTRAIN brings up a simple GUI to show the training of an MLP
+% network on classification and regression problems. The user should
+% load in a dataset (which should be in Netlab format: see DATREAD),
+% select the output activation function, the number of cycles and
+% hidden units and then train the network. The scaled conjugate
+% gradient algorithm is used. A graph shows the evolution of the error:
+% the value is shown MAX(CEIL(ITERATIONS / 50), 5) cycles.
+%
+% Once the network is trained, it is saved to the file MLPTRAIN.NET.
+% The results can then be viewed as a confusion matrix (for
+% classification problems) or a plot of output versus target (for
+% regression problems).
+%
+% See also
+% CONFMAT, DATREAD, MLP, NETOPT, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% If run without parameters, initialise gui.
+if nargin<1,
+ action='initialise';
+end;
+
+% Global variable to reference GUI figure
+global DEMTRAIN_FIG
+% Global array to reference sub-figures for results plots
+global DEMTRAIN_RES_FIGS
+global NUM_DEMTRAIN_RES_FIGS
+
+if strcmp(action,'initialise'),
+
+ file = '';
+ path = '.';
+
+ % Create FIGURE
+ fig = figure( ...
+ 'Name', 'Netlab Demo', ...
+ 'NumberTitle', 'off', ...
+ 'Menubar', 'none', ...
+ 'Color', [0.7529 0.7529 0.7529], ...
+ 'Visible', 'on');
+ % Initialise the globals
+ DEMTRAIN_FIG = fig;
+ DEMTRAIN_RES_FIGS = 0;
+ NUM_DEMTRAIN_RES_FIGS = 0;
+
+ % Create GROUP for buttons
+ uicontrol(fig, ...
+ 'Style', 'frame', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.03 0.08 0.94 0.22], ...
+ 'BackgroundColor', [0.5 0.5 0.5]);
+
+ % Create MAIN axis
+ hMain = axes( ...
+ 'Units', 'normalized', ...
+ 'Position', [0.10 0.5 0.80 0.40], ...
+ 'XColor', [0 0 0], ...
+ 'YColor', [0 0 0], ...
+ 'Visible', 'on');
+
+ % Create static text for FILENAME and PATH
+ hFilename = uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.7529 0.7529 0.7529], ...
+ 'Position', [0.05 0.32 0.90 0.05], ...
+ 'HorizontalAlignment', 'center', ...
+ 'String', 'Please load data file.', ...
+ 'Visible', 'on');
+ hPath = uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'Units', 'normalized', ...
+ 'BackgroundColor', [0.7529 0.7529 0.7529], ...
+ 'Position', [0.05 0.37 0.90 0.05], ...
+ 'HorizontalAlignment', 'center', ...
+ 'String', '', ...
+ 'Visible', 'on');
+
+ % Create NO OF HIDDEN UNITS slider and text
+ hSliderText = uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Units', 'normalized', ...
+ 'Position', [0.27 0.12 0.17 0.04], ...
+ 'HorizontalAlignment', 'right', ...
+ 'String', 'Hidden Units: 5');
+ hSlider = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.45 0.12 0.26 0.04], ...
+ 'String', 'Slider', ...
+ 'Min', 1, 'Max', 25, ...
+ 'Value', 5, ...
+ 'Callback', 'demtrain slider_moved');
+
+ % Create ITERATIONS slider and text
+ hIterationsText = uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Units', 'normalized', ...
+ 'Position', [0.27 0.21 0.17 0.04], ...
+ 'HorizontalAlignment', 'right', ...
+ 'String', 'Iterations: 50');
+ hIterations = uicontrol(fig, ...
+ 'Style', 'slider', ...
+ 'Units', 'normalized', ...
+ 'Position', [0.45 0.21 0.26 0.04], ...
+ 'String', 'Slider', ...
+ 'Min', 10, 'Max', 500, ...
+ 'Value', 50, ...
+ 'Callback', 'demtrain iterations_moved');
+
+ % Create ACTIVATION FUNCTION popup and text
+ uicontrol(fig, ...
+ 'Style', 'text', ...
+ 'BackgroundColor', [0.5 0.5 0.5], ...
+ 'Units', 'normalized', ...
+ 'Position', [0.05 0.20 0.20 0.04], ...
+ 'HorizontalAlignment', 'center', ...
+ 'String', 'Activation Function:');
+ hPopup = uicontrol(fig, ...
+ 'Style', 'popup', ...
+ 'Units', 'normalized', ...
+ 'Position' , [0.05 0.10 0.20 0.08], ...
+ 'String', 'Linear|Logistic|Softmax', ...
+ 'Callback', '');
+
+ % Create MENU
+ hMenu1 = uimenu('Label', 'Load Data file...', 'Callback', '');
+ uimenu(hMenu1, 'Label', 'Select training data file', ...
+ 'Callback', 'demtrain get_ip_file');
+ hMenu2 = uimenu('Label', 'Show Results...', 'Callback', '');
+ uimenu(hMenu2, 'Label', 'Show classification results', ...
+ 'Callback', 'demtrain classify');
+ uimenu(hMenu2, 'Label', 'Show regression results', ...
+ 'Callback', 'demtrain predict');
+
+ % Create START button
+ hStart = uicontrol(fig, ...
+ 'Units', 'normalized', ...
+ 'Position' , [0.75 0.2 0.20 0.08], ...
+ 'String', 'Start Training', ...
+ 'Enable', 'off',...
+ 'Callback', 'demtrain start');
+
+ % Create CLOSE button
+ uicontrol(fig, ...
+ 'Units', 'normalized', ...
+ 'Position' , [0.75 0.1 0.20 0.08], ...
+ 'String', 'Close', ...
+ 'Callback', 'demtrain close');
+
+ % Save handles of important UI objects
+ hndlList = [hSlider hSliderText hFilename hPath hPopup ...
+ hIterations hIterationsText hStart];
+ set(fig, 'UserData', hndlList);
+ % Hide window from command line
+ set(fig, 'HandleVisibility', 'callback');
+
+
+elseif strcmp(action, 'slider_moved'),
+
+ % Slider has been moved.
+
+ hndlList = get(gcf, 'UserData');
+ hSlider = hndlList(1);
+ hSliderText = hndlList(2);
+
+ val = get(hSlider, 'Value');
+ if rem(val, 1) < 0.5, % Force up and down arrows to work!
+ val = ceil(val);
+ else
+ val = floor(val);
+ end;
+ set(hSlider, 'Value', val);
+ set(hSliderText, 'String', ['Hidden Units: ' int2str(val)]);
+
+
+elseif strcmp(action, 'iterations_moved'),
+
+ % Slider has been moved.
+
+ hndlList = get(gcf, 'UserData');
+ hSlider = hndlList(6);
+ hSliderText = hndlList(7);
+
+ val = get(hSlider, 'Value');
+ set(hSliderText, 'String', ['Iterations: ' int2str(val)]);
+
+elseif strcmp(action, 'get_ip_file'),
+
+ % Get data file button pressed.
+
+ hndlList = get(gcf, 'UserData');
+
+ [file, path] = uigetfile('*.dat', 'Get Data File', 50, 50);
+
+ if strcmp(file, '') | file == 0,
+ set(hndlList(3), 'String', 'No data file loaded.');
+ set(hndlList(4), 'String', '');
+ else
+ set(hndlList(3), 'String', file);
+ set(hndlList(4), 'String', path);
+ end;
+
+ % Enable training button
+ set(hndlList(8), 'Enable', 'on');
+
+ set(gcf, 'UserData', hndlList);
+
+elseif strcmp(action, 'start'),
+
+ % Start training
+
+ % Get handles of and values from UI objects
+ hndlList = get(gcf, 'UserData');
+ hSlider = hndlList(1); % No of hidden units
+ hIterations = hndlList(6);
+ iterations = get(hIterations, 'Value');
+
+ hFilename = hndlList(3); % Data file name
+ filename = get(hFilename, 'String');
+
+ hPath = hndlList(4); % Data file path
+ path = get(hPath, 'String');
+
+ hPopup = hndlList(5); % Activation function
+ if get(hPopup, 'Value') == 1,
+ act_fn = 'linear';
+ elseif get(hPopup, 'Value') == 2,
+ act_fn = 'logistic';
+ else
+ act_fn = 'softmax';
+ end;
+ nhidden = get(hSlider, 'Value');
+
+ % Check data file exists
+ if fopen([path '/' filename]) == -1,
+ errordlg('Training data file has not been selected.', 'Error');
+ else
+ % Load data file
+ [x,t,nin,nout,ndata] = datread([path filename]);
+
+ % Call MLPTRAIN function repeatedly, while drawing training graph.
+ figure(DEMTRAIN_FIG);
+ hold on;
+
+ title('Training - please wait.');
+
+ % Create net and find initial error
+ net = mlp(size(x, 2), nhidden, size(t, 2), act_fn);
+ % Initialise network with inverse variance of 10
+ net = mlpinit(net, 10);
+ error = mlperr(net, x, t);
+ % Work out reporting step: should be sufficiently big to let training
+ % algorithm have a chance
+ step = max(ceil(iterations / 50), 5);
+
+ % Refresh and rescale axis.
+ cla;
+ max = error;
+ min = max/10;
+ set(gca, 'YScale', 'log');
+ ylabel('log Error');
+ xlabel('No. iterations');
+ axis([0 iterations min max+1]);
+ iold = 0;
+ errold = error;
+ % Plot circle to show error of last iteration
+ % Setting erase mode to none prevents screen flashing during
+ % training
+ plot(0, error, 'ro', 'EraseMode', 'none');
+ hold on
+ drawnow; % Force redraw
+ for i = step-1:step:iterations,
+ [net, error] = mlptrain(net, x, t, step);
+ % Plot line from last point to new point.
+ line([iold i], [errold error], 'Color', 'r', 'EraseMode', 'none');
+ iold = i;
+ errold = error;
+
+ % If new point off scale, redraw axes.
+ if error > max,
+ max = error;
+ axis([0 iterations min max+1]);
+ end;
+ if error < min
+ min = error/10;
+ axis([0 iterations min max+1]);
+ end
+ % Plot circle to show error of last iteration
+ plot(i, error, 'ro', 'EraseMode', 'none');
+ drawnow; % Force redraw
+ end;
+ save mlptrain.net net
+ zoom on;
+
+ title(['Training complete. Final error=', num2str(error)]);
+
+ end;
+
+elseif strcmp(action, 'close'),
+
+ % Close all the figures we have created
+ close(DEMTRAIN_FIG);
+ for n = 1:NUM_DEMTRAIN_RES_FIGS
+ if ishandle(DEMTRAIN_RES_FIGS(n))
+ close(DEMTRAIN_RES_FIGS(n));
+ end
+ end
+
+elseif strcmp(action, 'classify'),
+
+ if fopen('mlptrain.net') == -1,
+ errordlg('You have not yet trained the network.', 'Error');
+ else
+
+ hndlList = get(gcf, 'UserData');
+ filename = get(hndlList(3), 'String');
+ path = get(hndlList(4), 'String');
+ [x,t,nin,nout,ndata] = datread([path filename]);
+ load mlptrain.net net -mat
+ y = mlpfwd(net, x);
+
+ % Save results figure so that it can be closed later
+ NUM_DEMTRAIN_RES_FIGS = NUM_DEMTRAIN_RES_FIGS + 1;
+ DEMTRAIN_RES_FIGS(NUM_DEMTRAIN_RES_FIGS)=conffig(y,t);
+
+ end;
+
+elseif strcmp(action, 'predict'),
+
+ if fopen('mlptrain.net') == -1,
+ errordlg('You have not yet trained the network.', 'Error');
+ else
+
+ hndlList = get(gcf, 'UserData');
+ filename = get(hndlList(3), 'String');
+ path = get(hndlList(4), 'String');
+ [x,t,nin,nout,ndata] = datread([path filename]);
+ load mlptrain.net net -mat
+ y = mlpfwd(net, x);
+
+ for i = 1:size(y,2),
+ % Save results figure so that it can be closed later
+ NUM_DEMTRAIN_RES_FIGS = NUM_DEMTRAIN_RES_FIGS + 1;
+ DEMTRAIN_RES_FIGS(NUM_DEMTRAIN_RES_FIGS) = figure;
+ hold on;
+ title(['Output no ' num2str(i)]);
+ plot([0 1], [0 1], 'r:');
+ plot(y(:,i),t(:,i), 'o');
+ hold off;
+ end;
+ end;
+
+end;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/dist2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/dist2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+function n2 = dist2(x, c)
+%DIST2 Calculates squared distance between two sets of points.
+%
+% Description
+% D = DIST2(X, C) takes two matrices of vectors and calculates the
+% squared Euclidean distance between them. Both matrices must be of
+% the same column dimension. If X has M rows and N columns, and C has
+% L rows and N columns, then the result has M rows and L columns. The
+% I, Jth entry is the squared distance from the Ith row of X to the
+% Jth row of C.
+%
+% See also
+% GMMACTIV, KMEANS, RBFFWD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+[ndata, dimx] = size(x);
+[ncentres, dimc] = size(c);
+if dimx ~= dimc
+ error('Data dimension does not match dimension of centres')
+end
+
+n2 = (ones(ncentres, 1) * sum((x.^2)', 1))' + ...
+ ones(ndata, 1) * sum((c.^2)',1) - ...
+ 2.*(x*(c'));
+
+% Rounding errors occasionally cause negative entries in n2
+if any(any(n2<0))
+ n2(n2<0) = 0;
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/eigdec.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/eigdec.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+function [evals, evec] = eigdec(x, N)
+%EIGDEC Sorted eigendecomposition
+%
+% Description
+% EVALS = EIGDEC(X, N computes the largest N eigenvalues of the
+% matrix X in descending order. [EVALS, EVEC] = EIGDEC(X, N) also
+% computes the corresponding eigenvectors.
+%
+% See also
+% PCA, PPCA
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargout == 1
+ evals_only = logical(1);
+else
+ evals_only = logical(0);
+end
+
+if N ~= round(N) | N < 1 | N > size(x, 2)
+ error('Number of PCs must be integer, >0, < dim');
+end
+
+% Find the eigenvalues of the data covariance matrix
+if evals_only
+ % Use eig function as always more efficient than eigs here
+ temp_evals = eig(x);
+else
+ % Use eig function unless fraction of eigenvalues required is tiny
+ if (N/size(x, 2)) > 0.04
+ [temp_evec, temp_evals] = eig(x);
+ else
+ options.disp = 0;
+ [temp_evec, temp_evals] = eigs(x, N, 'LM', options);
+ end
+ temp_evals = diag(temp_evals);
+end
+
+% Eigenvalues nearly always returned in descending order, but just
+% to make sure.....
+[evals perm] = sort(-temp_evals);
+evals = -evals(1:N);
+if ~evals_only
+ if evals == temp_evals(1:N)
+ % Originals were in order
+ evec = temp_evec(:, 1:N);
+ return
+ else
+ % Need to reorder the eigenvectors
+ for i=1:N
+ evec(:,i) = temp_evec(:,perm(i));
+ end
+ end
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/errbayes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/errbayes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function [e, edata, eprior] = errbayes(net, edata)
+%ERRBAYES Evaluate Bayesian error function for network.
+%
+% Description
+% E = ERRBAYES(NET, EDATA) takes a network data structure NET together
+% the data contribution to the error for a set of inputs and targets.
+% It returns the regularised error using any zero mean Gaussian priors
+% on the weights defined in NET.
+%
+% [E, EDATA, EPRIOR] = ERRBAYES(NET, X, T) additionally returns the
+% data and prior components of the error.
+%
+% See also
+% GLMERR, MLPERR, RBFERR
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Evaluate the data contribution to the error.
+if isfield(net, 'beta')
+ e1 = net.beta*edata;
+else
+ e1 = edata;
+end
+
+% Evaluate the prior contribution to the error.
+if isfield(net, 'alpha')
+ w = netpak(net);
+ if size(net.alpha) == [1 1]
+ eprior = 0.5*(w*w');
+ e2 = eprior*net.alpha;
+ else
+ if (isfield(net, 'mask'))
+ nindx_cols = size(net.index, 2);
+ nmask_rows = size(find(net.mask), 1);
+ index = reshape(net.index(logical(repmat(net.mask, ...
+ 1, nindx_cols))), nmask_rows, nindx_cols);
+ else
+ index = net.index;
+ end
+ eprior = 0.5*(w.^2)*index;
+ e2 = eprior*net.alpha;
+ end
+else
+ eprior = 0;
+ e2 = 0;
+end
+
+e = e1 + e2;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/evidence.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/evidence.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,114 @@
+function [net, gamma, logev] = evidence(net, x, t, num)
+%EVIDENCE Re-estimate hyperparameters using evidence approximation.
+%
+% Description
+% [NET] = EVIDENCE(NET, X, T) re-estimates the hyperparameters ALPHA
+% and BETA by applying Bayesian re-estimation formulae for NUM
+% iterations. The hyperparameter ALPHA can be a simple scalar
+% associated with an isotropic prior on the weights, or can be a vector
+% in which each component is associated with a group of weights as
+% defined by the INDEX matrix in the NET data structure. These more
+% complex priors can be set up for an MLP using MLPPRIOR. Initial
+% values for the iterative re-estimation are taken from the network
+% data structure NET passed as an input argument, while the return
+% argument NET contains the re-estimated values.
+%
+% [NET, GAMMA, LOGEV] = EVIDENCE(NET, X, T, NUM) allows the re-
+% estimation formula to be applied for NUM cycles in which the re-
+% estimated values for the hyperparameters from each cycle are used to
+% re-evaluate the Hessian matrix for the next cycle. The return value
+% GAMMA is the number of well-determined parameters and LOGEV is the
+% log of the evidence.
+%
+% See also
+% MLPPRIOR, NETGRAD, NETHESS, DEMEV1, DEMARD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, '', x, t);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+ndata = size(x, 1);
+if nargin == 3
+ num = 1;
+end
+
+% Extract weights from network
+w = netpak(net);
+
+% Evaluate data-dependent contribution to the Hessian matrix.
+[h, dh] = nethess(w, net, x, t);
+clear h; % To save memory when Hessian is large
+if (~isfield(net, 'beta'))
+ local_beta = 1;
+end
+
+[evec, evl] = eig(dh);
+% Now set the negative eigenvalues to zero.
+evl = evl.*(evl > 0);
+% safe_evl is used to avoid taking log of zero
+safe_evl = evl + eps.*(evl <= 0);
+
+[e, edata, eprior] = neterr(w, net, x, t);
+
+if size(net.alpha) == [1 1]
+ % Form vector of eigenvalues
+ evl = diag(evl);
+ safe_evl = diag(safe_evl);
+else
+ ngroups = size(net.alpha, 1);
+ gams = zeros(1, ngroups);
+ logas = zeros(1, ngroups);
+ % Reconstruct data hessian with negative eigenvalues set to zero.
+ dh = evec*evl*evec';
+end
+
+% Do the re-estimation.
+for k = 1 : num
+ % Re-estimate alpha.
+ if size(net.alpha) == [1 1]
+ % Evaluate number of well-determined parameters.
+ L = evl;
+ if isfield(net, 'beta')
+ L = net.beta*L;
+ end
+ gamma = sum(L./(L + net.alpha));
+ net.alpha = 0.5*gamma/eprior;
+ % Partially evaluate log evidence: only include unmasked weights
+ logev = 0.5*length(w)*log(net.alpha);
+ else
+ hinv = inv(hbayes(net, dh));
+ for m = 1 : ngroups
+ group_nweights = sum(net.index(:, m));
+ gams(m) = group_nweights - ...
+ net.alpha(m)*sum(diag(hinv).*net.index(:,m));
+ net.alpha(m) = real(gams(m)/(2*eprior(m)));
+ % Weight alphas by number of weights in group
+ logas(m) = 0.5*group_nweights*log(net.alpha(m));
+ end
+ gamma = sum(gams, 2);
+ logev = sum(logas);
+ end
+ % Re-estimate beta.
+ if isfield(net, 'beta')
+ net.beta = 0.5*(net.nout*ndata - gamma)/edata;
+ logev = logev + 0.5*ndata*log(net.beta) - 0.5*ndata*log(2*pi);
+ local_beta = net.beta;
+ end
+
+ % Evaluate new log evidence
+ e = errbayes(net, edata);
+ if size(net.alpha) == [1 1]
+ logev = logev - e - 0.5*sum(log(local_beta*safe_evl+net.alpha));
+ else
+ for m = 1:ngroups
+ logev = logev - e - ...
+ 0.5*sum(log(local_beta*(safe_evl*net.index(:, m))+...
+ net.alpha(m)));
+ end
+ end
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/fevbayes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/fevbayes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,59 @@
+function [extra, invhess] = fevbayes(net, y, a, x, t, x_test, invhess)
+%FEVBAYES Evaluate Bayesian regularisation for network forward propagation.
+%
+% Description
+% EXTRA = FEVBAYES(NET, Y, A, X, T, X_TEST) takes a network data
+% structure NET together with a set of hidden unit activations A from
+% test inputs X_TEST, training data inputs X and T and outputs a matrix
+% of extra information EXTRA that consists of error bars (variance) for
+% a regression problem or moderated outputs for a classification
+% problem. The optional argument (and return value) INVHESS is the
+% inverse of the network Hessian computed on the training data inputs
+% and targets. Passing it in avoids recomputing it, which can be a
+% significant saving for large training sets.
+%
+% This is called by network-specific functions such as MLPEVFWD which
+% are needed since the return values (predictions and hidden unit
+% activations) for different network types are in different orders (for
+% good reasons).
+%
+% See also
+% MLPEVFWD, RBFEVFWD, GLMEVFWD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+w = netpak(net);
+g = netderiv(w, net, x_test);
+if nargin < 7
+ % Need to compute inverse hessian
+ hess = nethess(w, net, x, t);
+ invhess = inv(hess);
+end
+
+ntest = size(x_test, 1);
+var = zeros(ntest, 1);
+for idx = 1:1:net.nout,
+ for n = 1:1:ntest,
+ grad = squeeze(g(n,:,idx));
+ var(n,idx) = grad*invhess*grad';
+ end
+end
+
+switch net.outfn
+ case 'linear'
+ % extra is variance
+ extra = ones(size(var))./net.beta + var;
+ case 'logistic'
+ % extra is moderated output
+ kappa = 1./(sqrt(ones(size(var)) + (pi.*var)./8));
+ extra = 1./(1 + exp(-kappa.*a));
+ case 'softmax'
+ % Use extended Mackay formula; beware that this may not
+ % be very accurate
+ kappa = 1./(sqrt(ones(size(var)) + (pi.*var)./8));
+ temp = exp(kappa.*a);
+ extra = temp./(sum(temp, 2)*ones(1, net.nout));
+ otherwise
+ error(['Unknown activation function ', net.outfn]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gauss.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gauss.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,33 @@
+function y = gauss(mu, covar, x)
+%GAUSS Evaluate a Gaussian distribution.
+%
+% Description
+%
+% Y = GAUSS(MU, COVAR, X) evaluates a multi-variate Gaussian density
+% in D-dimensions at a set of points given by the rows of the matrix X.
+% The Gaussian density has mean vector MU and covariance matrix COVAR.
+%
+% See also
+% GSAMP, DEMGAUSS
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+[n, d] = size(x);
+
+[j, k] = size(covar);
+
+% Check that the covariance matrix is the correct dimension
+if ((j ~= d) | (k ~=d))
+ error('Dimension of the covariance matrix and data should match');
+end
+
+invcov = inv(covar);
+mu = reshape(mu, 1, d); % Ensure that mu is a row vector
+
+x = x - ones(n, 1)*mu;
+fact = sum(((x*invcov).*x), 2);
+
+y = exp(-0.5*fact);
+
+y = y./sqrt((2*pi)^d*det(covar));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gbayes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gbayes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,56 @@
+function [g, gdata, gprior] = gbayes(net, gdata)
+%GBAYES Evaluate gradient of Bayesian error function for network.
+%
+% Description
+% G = GBAYES(NET, GDATA) takes a network data structure NET together
+% the data contribution to the error gradient for a set of inputs and
+% targets. It returns the regularised error gradient using any zero
+% mean Gaussian priors on the weights defined in NET. In addition, if
+% a MASK is defined in NET, then the entries in G that correspond to
+% weights with a 0 in the mask are removed.
+%
+% [G, GDATA, GPRIOR] = GBAYES(NET, GDATA) additionally returns the data
+% and prior components of the error.
+%
+% See also
+% ERRBAYES, GLMGRAD, MLPGRAD, RBFGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Evaluate the data contribution to the gradient.
+if (isfield(net, 'mask'))
+ gdata = gdata(logical(net.mask));
+end
+if isfield(net, 'beta')
+ g1 = gdata*net.beta;
+else
+ g1 = gdata;
+end
+
+% Evaluate the prior contribution to the gradient.
+if isfield(net, 'alpha')
+ w = netpak(net);
+ if size(net.alpha) == [1 1]
+ gprior = w;
+ g2 = net.alpha*gprior;
+ else
+ if (isfield(net, 'mask'))
+ nindx_cols = size(net.index, 2);
+ nmask_rows = size(find(net.mask), 1);
+ index = reshape(net.index(logical(repmat(net.mask, ...
+ 1, nindx_cols))), nmask_rows, nindx_cols);
+ else
+ index = net.index;
+ end
+
+ ngroups = size(net.alpha, 1);
+ gprior = index'.*(ones(ngroups, 1)*w);
+ g2 = net.alpha'*gprior;
+ end
+else
+ gprior = 0;
+ g2 = 0;
+end
+
+g = g1 + g2;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/glm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/glm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,82 @@
+function net = glm(nin, nout, outfunc, prior, beta)
+%GLM Create a generalized linear model.
+%
+% Description
+%
+% NET = GLM(NIN, NOUT, FUNC) takes the number of inputs and outputs for
+% a generalized linear model, together with a string FUNC which
+% specifies the output unit activation function, and returns a data
+% structure NET. The weights are drawn from a zero mean, isotropic
+% Gaussian, with variance scaled by the fan-in of the output units.
+% This makes use of the Matlab function RANDN and so the seed for the
+% random weight initialization can be set using RANDN('STATE', S)
+% where S is the seed value. The optional argument ALPHA sets the
+% inverse variance for the weight initialization.
+%
+% The fields in NET are
+% type = 'glm'
+% nin = number of inputs
+% nout = number of outputs
+% nwts = total number of weights and biases
+% actfn = string describing the output unit activation function:
+% 'linear'
+% 'logistic'
+% 'softmax'
+% w1 = first-layer weight matrix
+% b1 = first-layer bias vector
+%
+% NET = GLM(NIN, NOUT, FUNC, PRIOR), in which PRIOR is a scalar, allows
+% the field NET.ALPHA in the data structure NET to be set,
+% corresponding to a zero-mean isotropic Gaussian prior with inverse
+% variance with value PRIOR. Alternatively, PRIOR can consist of a data
+% structure with fields ALPHA and INDEX, allowing individual Gaussian
+% priors to be set over groups of weights in the network. Here ALPHA is
+% a column vector in which each element corresponds to a separate
+% group of weights, which need not be mutually exclusive. The
+% membership of the groups is defined by the matrix INDEX in which the
+% columns correspond to the elements of ALPHA. Each column has one
+% element for each weight in the matrix, in the order defined by the
+% function GLMPAK, and each element is 1 or 0 according to whether the
+% weight is a member of the corresponding group or not.
+%
+% NET = GLM(NIN, NOUT, FUNC, PRIOR, BETA) also sets the additional
+% field NET.BETA in the data structure NET, where beta corresponds to
+% the inverse noise variance.
+%
+% See also
+% GLMPAK, GLMUNPAK, GLMFWD, GLMERR, GLMGRAD, GLMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+net.type = 'glm';
+net.nin = nin;
+net.nout = nout;
+net.nwts = (nin + 1)*nout;
+
+outtfns = {'linear', 'logistic', 'softmax'};
+
+if sum(strcmp(outfunc, outtfns)) == 0
+ error('Undefined activation function. Exiting.');
+else
+ net.outfn = outfunc;
+end
+
+if nargin > 3
+ if isstruct(prior)
+ net.alpha = prior.alpha;
+ net.index = prior.index;
+ elseif size(prior) == [1 1]
+ net.alpha = prior;
+ else
+ error('prior must be a scalar or structure');
+ end
+end
+
+net.w1 = randn(nin, nout)/sqrt(nin + 1);
+net.b1 = randn(1, nout)/sqrt(nin + 1);
+
+if nargin == 5
+ net.beta = beta;
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/glmderiv.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/glmderiv.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,40 @@
+function g = glmderiv(net, x)
+%GLMDERIV Evaluate derivatives of GLM outputs with respect to weights.
+%
+% Description
+% G = GLMDERIV(NET, X) takes a network data structure NET and a matrix
+% of input vectors X and returns a three-index matrix mat{g} whose I,
+% J, K element contains the derivative of network output K with respect
+% to weight or bias parameter J for input pattern I. The ordering of
+% the weight and bias parameters is defined by GLMUNPAK.
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'glm', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+ndata = size(x, 1);
+if isfield(net, 'mask')
+ nwts = size(find(net.mask), 1);
+ mask_array = logical(net.mask)*ones(1, net.nout);
+else
+ nwts = net.nwts;
+end
+g = zeros(ndata, nwts, net.nout);
+
+temp = zeros(net.nwts, net.nout);
+for n = 1:ndata
+ % Weight matrix w1
+ temp(1:(net.nin*net.nout), :) = kron(eye(net.nout), (x(n, :))');
+ % Bias term b1
+ temp(net.nin*net.nout+1:end, :) = eye(net.nout);
+ if isfield(net, 'mask')
+ g(n, :, :) = reshape(temp(find(mask_array)), nwts, net.nout);
+ else
+ g(n, :, :) = temp;
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/glmerr.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/glmerr.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function [e, edata, eprior, y, a] = glmerr(net, x, t)
+%GLMERR Evaluate error function for generalized linear model.
+%
+% Description
+% E = GLMERR(NET, X, T) takes a generalized linear model data
+% structure NET together with a matrix X of input vectors and a matrix
+% T of target vectors, and evaluates the error function E. The choice
+% of error function corresponds to the output unit activation function.
+% Each row of X corresponds to one input vector and each row of T
+% corresponds to one target vector.
+%
+% [E, EDATA, EPRIOR, Y, A] = GLMERR(NET, X, T) also returns the data
+% and prior components of the total error.
+%
+% [E, EDATA, EPRIOR, Y, A] = GLMERR(NET, X) also returns a matrix Y
+% giving the outputs of the models and a matrix A giving the summed
+% inputs to each output unit, where each row corresponds to one
+% pattern.
+%
+% See also
+% GLM, GLMPAK, GLMUNPAK, GLMFWD, GLMGRAD, GLMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'glm', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+[y, a] = glmfwd(net, x);
+
+switch net.outfn
+
+ case 'linear' % Linear outputs
+ edata = 0.5*sum(sum((y - t).^2));
+
+ case 'logistic' % Logistic outputs
+ edata = - sum(sum(t.*log(y) + (1 - t).*log(1 - y)));
+
+ case 'softmax' % Softmax outputs
+ edata = - sum(sum(t.*log(y)));
+
+ otherwise
+ error(['Unknown activation function ', net.outfn]);
+end
+
+[e, edata, eprior] = errbayes(net, edata);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/glmevfwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/glmevfwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,28 @@
+function [y, extra, invhess] = glmevfwd(net, x, t, x_test, invhess)
+%GLMEVFWD Forward propagation with evidence for GLM
+%
+% Description
+% Y = GLMEVFWD(NET, X, T, X_TEST) takes a network data structure NET
+% together with the input X and target T training data and input test
+% data X_TEST. It returns the normal forward propagation through the
+% network Y together with a matrix EXTRA which consists of error bars
+% (variance) for a regression problem or moderated outputs for a
+% classification problem.
+%
+% The optional argument (and return value) INVHESS is the inverse of
+% the network Hessian computed on the training data inputs and targets.
+% Passing it in avoids recomputing it, which can be a significant
+% saving for large training sets.
+%
+% See also
+% FEVBAYES
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+[y, a] = glmfwd(net, x_test);
+if nargin == 4
+ [extra, invhess] = fevbayes(net, y, a, x, t, x_test);
+else
+ [extra, invhess] = fevbayes(net, y, a, x, t, x_test, invhess);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/glmfwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/glmfwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+function [y, a] = glmfwd(net, x)
+%GLMFWD Forward propagation through generalized linear model.
+%
+% Description
+% Y = GLMFWD(NET, X) takes a generalized linear model data structure
+% NET together with a matrix X of input vectors, and forward propagates
+% the inputs through the network to generate a matrix Y of output
+% vectors. Each row of X corresponds to one input vector and each row
+% of Y corresponds to one output vector.
+%
+% [Y, A] = GLMFWD(NET, X) also returns a matrix A giving the summed
+% inputs to each output unit, where each row corresponds to one
+% pattern.
+%
+% See also
+% GLM, GLMPAK, GLMUNPAK, GLMERR, GLMGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'glm', x);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+ndata = size(x, 1);
+
+a = x*net.w1 + ones(ndata, 1)*net.b1;
+
+switch net.outfn
+
+ case 'linear' % Linear outputs
+ y = a;
+
+ case 'logistic' % Logistic outputs
+ % Prevent overflow and underflow: use same bounds as glmerr
+ % Ensure that log(1-y) is computable: need exp(a) > eps
+ maxcut = -log(eps);
+ % Ensure that log(y) is computable
+ mincut = -log(1/realmin - 1);
+ a = min(a, maxcut);
+ a = max(a, mincut);
+ y = 1./(1 + exp(-a));
+
+ case 'softmax' % Softmax outputs
+ nout = size(a,2);
+ % Prevent overflow and underflow: use same bounds as glmerr
+ % Ensure that sum(exp(a), 2) does not overflow
+ maxcut = log(realmax) - log(nout);
+ % Ensure that exp(a) > 0
+ mincut = log(realmin);
+ a = min(a, maxcut);
+ a = max(a, mincut);
+ temp = exp(a);
+ y = temp./(sum(temp, 2)*ones(1,nout));
+ % Ensure that log(y) is computable
+ y(y errold
+ errold = err;
+ w = wold;
+ options(8) = err;
+ fprintf(1, 'Error has increased: terminating\n')
+ return;
+ end
+ if test & n > 1
+ if (max(abs(w - wold)) < options(2) & abs(err-errold) < options(3))
+ options(8) = err;
+ return;
+ else
+ errold = err;
+ wold = w;
+ end
+ end
+end
+
+options(8) = err;
+if (options(1) >= 0)
+ disp(maxitmess);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/glmunpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/glmunpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function net = glmunpak(net, w)
+%GLMUNPAK Separates weights vector into weight and bias matrices.
+%
+% Description
+% NET = GLMUNPAK(NET, W) takes a glm network data structure NET and a
+% weight vector W, and returns a network data structure identical to
+% the input network, except that the first-layer weight matrix W1 and
+% the first-layer bias vector B1 have been set to the corresponding
+% elements of W.
+%
+% See also
+% GLM, GLMPAK, GLMFWD, GLMERR, GLMGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'glm');
+if ~errstring
+ error(errstring);
+end
+
+if net.nwts ~= length(w)
+ error('Invalid weight vector length')
+end
+
+nin = net.nin;
+nout = net.nout;
+net.w1 = reshape(w(1:nin*nout), nin, nout);
+net.b1 = reshape(w(nin*nout + 1: (nin + 1)*nout), 1, nout);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,113 @@
+function mix = gmm(dim, ncentres, covar_type, ppca_dim)
+%GMM Creates a Gaussian mixture model with specified architecture.
+%
+% Description
+% MIX = GMM(DIM, NCENTRES, COVARTYPE) takes the dimension of the space
+% DIM, the number of centres in the mixture model and the type of the
+% mixture model, and returns a data structure MIX. The mixture model
+% type defines the covariance structure of each component Gaussian:
+% 'spherical' = single variance parameter for each component: stored as a vector
+% 'diag' = diagonal matrix for each component: stored as rows of a matrix
+% 'full' = full matrix for each component: stored as 3d array
+% 'ppca' = probabilistic PCA: stored as principal components (in a 3d array
+% and associated variances and off-subspace noise
+% MIX = GMM(DIM, NCENTRES, COVARTYPE, PPCA_DIM) also sets the
+% dimension of the PPCA sub-spaces: the default value is one.
+%
+% The priors are initialised to equal values summing to one, and the
+% covariances are all the identity matrix (or equivalent). The centres
+% are initialised randomly from a zero mean unit variance Gaussian.
+% This makes use of the MATLAB function RANDN and so the seed for the
+% random weight initialisation can be set using RANDN('STATE', S) where
+% S is the state value.
+%
+% The fields in MIX are
+%
+% type = 'gmm'
+% nin = the dimension of the space
+% ncentres = number of mixture components
+% covartype = string for type of variance model
+% priors = mixing coefficients
+% centres = means of Gaussians: stored as rows of a matrix
+% covars = covariances of Gaussians
+% The additional fields for mixtures of PPCA are
+% U = principal component subspaces
+% lambda = in-space covariances: stored as rows of a matrix
+% The off-subspace noise is stored in COVARS.
+%
+% See also
+% GMMPAK, GMMUNPAK, GMMSAMP, GMMINIT, GMMEM, GMMACTIV, GMMPOST,
+% GMMPROB
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if ncentres < 1
+ error('Number of centres must be greater than zero')
+end
+
+mix.type = 'gmm';
+mix.nin = dim;
+mix.ncentres = ncentres;
+
+vartypes = {'spherical', 'diag', 'full', 'ppca'};
+
+if sum(strcmp(covar_type, vartypes)) == 0
+ error('Undefined covariance type')
+else
+ mix.covar_type = covar_type;
+end
+
+% Make default dimension of PPCA subspaces one.
+if strcmp(covar_type, 'ppca')
+ if nargin < 4
+ ppca_dim = 1;
+ end
+ if ppca_dim > dim
+ error('Dimension of PPCA subspaces must be less than data.')
+ end
+ mix.ppca_dim = ppca_dim;
+end
+
+% Initialise priors to be equal and summing to one
+mix.priors = ones(1,mix.ncentres) ./ mix.ncentres;
+
+% Initialise centres
+mix.centres = randn(mix.ncentres, mix.nin);
+
+% Initialise all the variances to unity
+switch mix.covar_type
+
+case 'spherical'
+ mix.covars = ones(1, mix.ncentres);
+ mix.nwts = mix.ncentres + mix.ncentres*mix.nin + mix.ncentres;
+case 'diag'
+ % Store diagonals of covariance matrices as rows in a matrix
+ mix.covars = ones(mix.ncentres, mix.nin);
+ mix.nwts = mix.ncentres + mix.ncentres*mix.nin + ...
+ mix.ncentres*mix.nin;
+case 'full'
+ % Store covariance matrices in a row vector of matrices
+ mix.covars = repmat(eye(mix.nin), [1 1 mix.ncentres]);
+ mix.nwts = mix.ncentres + mix.ncentres*mix.nin + ...
+ mix.ncentres*mix.nin*mix.nin;
+case 'ppca'
+ % This is the off-subspace noise: make it smaller than
+ % lambdas
+ mix.covars = 0.1*ones(1, mix.ncentres);
+ % Also set aside storage for principal components and
+ % associated variances
+ init_space = eye(mix.nin);
+ init_space = init_space(:, 1:mix.ppca_dim);
+ init_space(mix.ppca_dim+1:mix.nin, :) = ...
+ ones(mix.nin - mix.ppca_dim, mix.ppca_dim);
+ mix.U = repmat(init_space , [1 1 mix.ncentres]);
+ mix.lambda = ones(mix.ncentres, mix.ppca_dim);
+ % Take account of additional parameters
+ mix.nwts = mix.ncentres + mix.ncentres*mix.nin + ...
+ mix.ncentres + mix.ncentres*mix.ppca_dim + ...
+ mix.ncentres*mix.nin*mix.ppca_dim;
+otherwise
+ error(['Unknown covariance type ', mix.covar_type]);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gmmactiv.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gmmactiv.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,77 @@
+function a = gmmactiv(mix, x)
+%GMMACTIV Computes the activations of a Gaussian mixture model.
+%
+% Description
+% This function computes the activations A (i.e. the probability
+% P(X|J) of the data conditioned on each component density) for a
+% Gaussian mixture model. For the PPCA model, each activation is the
+% conditional probability of X given that it is generated by the
+% component subspace. The data structure MIX defines the mixture model,
+% while the matrix X contains the data vectors. Each row of X
+% represents a single vector.
+%
+% See also
+% GMM, GMMPOST, GMMPROB
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check that inputs are consistent
+errstring = consist(mix, 'gmm', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+ndata = size(x, 1);
+a = zeros(ndata, mix.ncentres); % Preallocate matrix
+
+switch mix.covar_type
+
+case 'spherical'
+ % Calculate squared norm matrix, of dimension (ndata, ncentres)
+ n2 = dist2(x, mix.centres);
+
+ % Calculate width factors
+ wi2 = ones(ndata, 1) * (2 .* mix.covars);
+ normal = (pi .* wi2) .^ (mix.nin/2);
+
+ % Now compute the activations
+ a = exp(-(n2./wi2))./ normal;
+
+case 'diag'
+ normal = (2*pi)^(mix.nin/2);
+ s = prod(sqrt(mix.covars), 2);
+ for j = 1:mix.ncentres
+ diffs = x - (ones(ndata, 1) * mix.centres(j, :));
+ a(:, j) = exp(-0.5*sum((diffs.*diffs)./(ones(ndata, 1) * ...
+ mix.covars(j, :)), 2)) ./ (normal*s(j));
+ end
+
+case 'full'
+ normal = (2*pi)^(mix.nin/2);
+ for j = 1:mix.ncentres
+ diffs = x - (ones(ndata, 1) * mix.centres(j, :));
+ % Use Cholesky decomposition of covariance matrix to speed computation
+ c = chol(mix.covars(:, :, j));
+ temp = diffs/c;
+ a(:, j) = exp(-0.5*sum(temp.*temp, 2))./(normal*prod(diag(c)));
+ end
+case 'ppca'
+ log_normal = mix.nin*log(2*pi);
+ d2 = zeros(ndata, mix.ncentres);
+ logZ = zeros(1, mix.ncentres);
+ for i = 1:mix.ncentres
+ k = 1 - mix.covars(i)./mix.lambda(i, :);
+ logZ(i) = log_normal + mix.nin*log(mix.covars(i)) - ...
+ sum(log(1 - k));
+ diffs = x - ones(ndata, 1)*mix.centres(i, :);
+ proj = diffs*mix.U(:, :, i);
+ d2(:,i) = (sum(diffs.*diffs, 2) - ...
+ sum((proj.*(ones(ndata, 1)*k)).*proj, 2)) / ...
+ mix.covars(i);
+ end
+ a = exp(-0.5*(d2 + ones(ndata, 1)*logZ));
+otherwise
+ error(['Unknown covariance type ', mix.covar_type]);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gmmem.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gmmem.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,181 @@
+function [mix, options, errlog] = gmmem(mix, x, options)
+%GMMEM EM algorithm for Gaussian mixture model.
+%
+% Description
+% [MIX, OPTIONS, ERRLOG] = GMMEM(MIX, X, OPTIONS) uses the Expectation
+% Maximization algorithm of Dempster et al. to estimate the parameters
+% of a Gaussian mixture model defined by a data structure MIX. The
+% matrix X represents the data whose expectation is maximized, with
+% each row corresponding to a vector. The optional parameters have
+% the following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values; also logs error
+% values in the return argument ERRLOG. If OPTIONS(1) is set to 0, then
+% only warning messages are displayed. If OPTIONS(1) is -1, then
+% nothing is displayed.
+%
+% OPTIONS(3) is a measure of the absolute precision required of the
+% error function at the solution. If the change in log likelihood
+% between two steps of the EM algorithm is less than this value, then
+% the function terminates.
+%
+% OPTIONS(5) is set to 1 if a covariance matrix is reset to its
+% original value when any of its singular values are too small (less
+% than MIN_COVAR which has the value eps). With the default value of
+% 0 no action is taken.
+%
+% OPTIONS(14) is the maximum number of iterations; default 100.
+%
+% The optional return value OPTIONS contains the final error value
+% (i.e. data log likelihood) in OPTIONS(8).
+%
+% See also
+% GMM, GMMINIT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check that inputs are consistent
+errstring = consist(mix, 'gmm', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+[ndata, xdim] = size(x);
+
+% Sort out the options
+if (options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+
+display = options(1);
+store = 0;
+if (nargout > 2)
+ store = 1; % Store the error values to return them
+ errlog = zeros(1, niters);
+end
+test = 0;
+if options(3) > 0.0
+ test = 1; % Test log likelihood for termination
+end
+
+check_covars = 0;
+if options(5) >= 1
+ if display >= 0
+ disp('check_covars is on');
+ end
+ check_covars = 1; % Ensure that covariances don't collapse
+ MIN_COVAR = eps; % Minimum singular value of covariance matrix
+ init_covars = mix.covars;
+end
+
+% Main loop of algorithm
+for n = 1:niters
+
+ % Calculate posteriors based on old parameters
+ [post, act] = gmmpost(mix, x);
+
+ % Calculate error value if needed
+ if (display | store | test)
+ prob = act*(mix.priors)';
+ % Error value is negative log likelihood of data
+ e = - sum(log(prob));
+ if store
+ errlog(n) = e;
+ end
+ if display > 0
+ fprintf(1, 'Cycle %4d Error %11.6f\n', n, e);
+ end
+ if test
+ if (n > 1 & abs(e - eold) < options(3))
+ options(8) = e;
+ return;
+ else
+ eold = e;
+ end
+ end
+ end
+
+ % Adjust the new estimates for the parameters
+ new_pr = sum(post, 1);
+ new_c = post' * x;
+
+ % Now move new estimates to old parameter vectors
+ mix.priors = new_pr ./ ndata;
+
+ mix.centres = new_c ./ (new_pr' * ones(1, mix.nin));
+
+ switch mix.covar_type
+ case 'spherical'
+ n2 = dist2(x, mix.centres);
+ for j = 1:mix.ncentres
+ v(j) = (post(:,j)'*n2(:,j));
+ end
+ mix.covars = ((v./new_pr))./mix.nin;
+ if check_covars
+ % Ensure that no covariance is too small
+ for j = 1:mix.ncentres
+ if mix.covars(j) < MIN_COVAR
+ mix.covars(j) = init_covars(j);
+ end
+ end
+ end
+ case 'diag'
+ for j = 1:mix.ncentres
+ diffs = x - (ones(ndata, 1) * mix.centres(j,:));
+ mix.covars(j,:) = sum((diffs.*diffs).*(post(:,j)*ones(1, ...
+ mix.nin)), 1)./new_pr(j);
+ end
+ if check_covars
+ % Ensure that no covariance is too small
+ for j = 1:mix.ncentres
+ if min(mix.covars(j,:)) < MIN_COVAR
+ mix.covars(j,:) = init_covars(j,:);
+ end
+ end
+ end
+ case 'full'
+ for j = 1:mix.ncentres
+ diffs = x - (ones(ndata, 1) * mix.centres(j,:));
+ diffs = diffs.*(sqrt(post(:,j))*ones(1, mix.nin));
+ mix.covars(:,:,j) = (diffs'*diffs)/new_pr(j);
+ end
+ if check_covars
+ % Ensure that no covariance is too small
+ for j = 1:mix.ncentres
+ if min(svd(mix.covars(:,:,j))) < MIN_COVAR
+ mix.covars(:,:,j) = init_covars(:,:,j);
+ end
+ end
+ end
+ case 'ppca'
+ for j = 1:mix.ncentres
+ diffs = x - (ones(ndata, 1) * mix.centres(j,:));
+ diffs = diffs.*(sqrt(post(:,j))*ones(1, mix.nin));
+ [tempcovars, tempU, templambda] = ...
+ ppca((diffs'*diffs)/new_pr(j), mix.ppca_dim);
+ if length(templambda) ~= mix.ppca_dim
+ error('Unable to extract enough components');
+ else
+ mix.covars(j) = tempcovars;
+ mix.U(:, :, j) = tempU;
+ mix.lambda(j, :) = templambda;
+ end
+ end
+ if check_covars
+ if mix.covars(j) < MIN_COVAR
+ mix.covars(j) = init_covars(j);
+ end
+ end
+ otherwise
+ error(['Unknown covariance type ', mix.covar_type]);
+ end
+end
+
+options(8) = -sum(log(gmmprob(mix, x)));
+if (display >= 0)
+ disp(maxitmess);
+end
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gmminit.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gmminit.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,94 @@
+function mix = gmminit(mix, x, options)
+%GMMINIT Initialises Gaussian mixture model from data
+%
+% Description
+% MIX = GMMINIT(MIX, X, OPTIONS) uses a dataset X to initialise the
+% parameters of a Gaussian mixture model defined by the data structure
+% MIX. The k-means algorithm is used to determine the centres. The
+% priors are computed from the proportion of examples belonging to each
+% cluster. The covariance matrices are calculated as the sample
+% covariance of the points associated with (i.e. closest to) the
+% corresponding centres. For a mixture of PPCA model, the PPCA
+% decomposition is calculated for the points closest to a given centre.
+% This initialisation can be used as the starting point for training
+% the model using the EM algorithm.
+%
+% See also
+% GMM
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+[ndata, xdim] = size(x);
+
+% Check that inputs are consistent
+errstring = consist(mix, 'gmm', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+% Arbitrary width used if variance collapses to zero: make it 'large' so
+% that centre is responsible for a reasonable number of points.
+GMM_WIDTH = 1.0;
+
+% Use kmeans algorithm to set centres
+options(5) = 1;
+[mix.centres, options, post] = kmeansNetlab(mix.centres, x, options);
+
+% Set priors depending on number of points in each cluster
+cluster_sizes = max(sum(post, 1), 1); % Make sure that no prior is zero
+mix.priors = cluster_sizes/sum(cluster_sizes); % Normalise priors
+
+switch mix.covar_type
+case 'spherical'
+ if mix.ncentres > 1
+ % Determine widths as distance to nearest centre
+ % (or a constant if this is zero)
+ cdist = dist2(mix.centres, mix.centres);
+ cdist = cdist + diag(ones(mix.ncentres, 1)*realmax);
+ mix.covars = min(cdist);
+ mix.covars = mix.covars + GMM_WIDTH*(mix.covars < eps);
+ else
+ % Just use variance of all data points averaged over all
+ % dimensions
+ mix.covars = mean(diag(cov(x)));
+ end
+ case 'diag'
+ for j = 1:mix.ncentres
+ % Pick out data points belonging to this centre
+ c = x(find(post(:, j)),:);
+ diffs = c - (ones(size(c, 1), 1) * mix.centres(j, :));
+ mix.covars(j, :) = sum((diffs.*diffs), 1)/size(c, 1);
+ % Replace small entries by GMM_WIDTH value
+ mix.covars(j, :) = mix.covars(j, :) + GMM_WIDTH.*(mix.covars(j, :) 1
+ label = zeros(n, 1);
+end
+cum_prior = 0; % Cumulative sum of priors
+total_samples = 0; % Cumulative sum of number of sampled points
+for j = 1:mix.ncentres
+ num_samples = sum(priors >= cum_prior & ...
+ priors < cum_prior + mix.priors(j));
+ % Form a full covariance matrix
+ switch mix.covar_type
+ case 'spherical'
+ covar = mix.covars(j) * eye(mix.nin);
+ case 'diag'
+ covar = diag(mix.covars(j, :));
+ case 'full'
+ covar = mix.covars(:, :, j);
+ case 'ppca'
+ covar = mix.covars(j) * eye(mix.nin) + ...
+ mix.U(:, :, j)* ...
+ (diag(mix.lambda(j, :))-(mix.covars(j)*eye(mix.ppca_dim)))* ...
+ (mix.U(:, :, j)');
+ otherwise
+ error(['Unknown covariance type ', mix.covar_type]);
+ end
+ data(total_samples+1:total_samples+num_samples, :) = ...
+ gsamp(mix.centres(j, :), covar, num_samples);
+ if nargout > 1
+ label(total_samples+1:total_samples+num_samples) = j;
+ end
+ cum_prior = cum_prior + mix.priors(j);
+ total_samples = total_samples + num_samples;
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gmmunpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gmmunpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,54 @@
+function mix = gmmunpak(mix, p)
+%GMMUNPAK Separates a vector of Gaussian mixture model parameters into its components.
+%
+% Description
+% MIX = GMMUNPAK(MIX, P) takes a GMM data structure MIX and a single
+% row vector of parameters P and returns a mixture data structure
+% identical to the input MIX, except that the mixing coefficients
+% PRIORS, centres CENTRES and covariances COVARS (and, for PPCA, the
+% lambdas and U (PCA sub-spaces)) are all set to the corresponding
+% elements of P.
+%
+% See also
+% GMM, GMMPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(mix, 'gmm');
+if ~errstring
+ error(errstring);
+end
+if mix.nwts ~= length(p)
+ error('Invalid weight vector length')
+end
+
+mark1 = mix.ncentres;
+mark2 = mark1 + mix.ncentres*mix.nin;
+
+mix.priors = reshape(p(1:mark1), 1, mix.ncentres);
+mix.centres = reshape(p(mark1 + 1:mark2), mix.ncentres, mix.nin);
+switch mix.covar_type
+ case 'spherical'
+ mark3 = mix.ncentres*(2 + mix.nin);
+ mix.covars = reshape(p(mark2 + 1:mark3), 1, mix.ncentres);
+ case 'diag'
+ mark3 = mix.ncentres*(1 + mix.nin + mix.nin);
+ mix.covars = reshape(p(mark2 + 1:mark3), mix.ncentres, mix.nin);
+ case 'full'
+ mark3 = mix.ncentres*(1 + mix.nin + mix.nin*mix.nin);
+ mix.covars = reshape(p(mark2 + 1:mark3), mix.nin, mix.nin, ...
+ mix.ncentres);
+ case 'ppca'
+ mark3 = mix.ncentres*(2 + mix.nin);
+ mix.covars = reshape(p(mark2 + 1:mark3), 1, mix.ncentres);
+ % Now also extract k and eigenspaces
+ mark4 = mark3 + mix.ncentres*mix.ppca_dim;
+ mix.lambda = reshape(p(mark3 + 1:mark4), mix.ncentres, ...
+ mix.ppca_dim);
+ mix.U = reshape(p(mark4 + 1:end), mix.nin, mix.ppca_dim, ...
+ mix.ncentres);
+ otherwise
+ error(['Unknown covariance type ', mix.covar_type]);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gp.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gp.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,93 @@
+function net = gp(nin, covar_fn, prior)
+%GP Create a Gaussian Process.
+%
+% Description
+%
+% NET = GP(NIN, COVARFN) takes the number of inputs NIN for a Gaussian
+% Process model with a single output, together with a string COVARFN
+% which specifies the type of the covariance function, and returns a
+% data structure NET. The parameters are set to zero.
+%
+% The fields in NET are
+% type = 'gp'
+% nin = number of inputs
+% nout = number of outputs: always 1
+% nwts = total number of weights and covariance function parameters
+% bias = logarithm of constant offset in covariance function
+% noise = logarithm of output noise variance
+% inweights = logarithm of inverse length scale for each input
+% covarfn = string describing the covariance function:
+% 'sqexp'
+% 'ratquad'
+% fpar = covariance function specific parameters (1 for squared exponential,
+% 2 for rational quadratic)
+% trin = training input data (initially empty)
+% trtargets = training target data (initially empty)
+%
+% NET = GP(NIN, COVARFN, PRIOR) sets a Gaussian prior on the parameters
+% of the model. PRIOR must contain the fields PR_MEAN and PR_VARIANCE.
+% If PR_MEAN is a scalar, then the Gaussian is assumed to be isotropic
+% and the additional fields NET.PR_MEAN and PR_VARIANCE are set.
+% Otherwise, the Gaussian prior has a mean defined by a column vector
+% of parameters PRIOR.PR_MEAN and covariance defined by a column vector
+% of parameters PRIOR.PR_VARIANCE. Each element of PRMEAN corresponds
+% to a separate group of parameters, which need not be mutually
+% exclusive. The membership of the groups is defined by the matrix
+% PRIOR.INDEX in which the columns correspond to the elements of
+% PRMEAN. Each column has one element for each weight in the matrix, in
+% the order defined by the function GPPAK, and each element is 1 or 0
+% according to whether the parameter is a member of the corresponding
+% group or not. The additional field NET.INDEX is set in this case.
+%
+% See also
+% GPPAK, GPUNPAK, GPFWD, GPERR, GPCOVAR, GPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+net.type = 'gp';
+net.nin = nin;
+net.nout = 1; % Only do single output GP
+
+% Store log parameters
+net.bias = 0;
+net.min_noise = sqrt(eps); % Prevent output noise collapsing completely
+net.noise = 0;
+net.inweights = zeros(1,nin); % Weights on inputs in covariance function
+
+covarfns = {'sqexp', 'ratquad'};
+
+if sum(strcmp(covar_fn, covarfns)) == 0
+ error('Undefined activation function. Exiting.');
+else
+ net.covar_fn = covar_fn;
+end
+
+switch covar_fn
+
+ case 'sqexp' % Squared exponential
+ net.fpar = zeros(1,1); % One function specific parameter
+
+ case 'ratquad' % Rational quadratic
+ net.fpar = zeros(1, 2); % Two function specific parameters
+
+ otherwise
+ error(['Unknown covariance function ', covar_fn]);
+end
+
+net.nwts = 2 + nin + length(net.fpar);
+
+if nargin >= 3
+ if size(prior.pr_mean) == [1 1]
+ net.pr_mean = prior.pr_mean;
+ net.pr_var = prior.pr_var;
+ else
+ net.pr_mean = prior.pr_mean;
+ net.pr_var = prior.pr_var;
+ net.index = prior.index;
+ end
+end
+
+% Store training data as needed for gpfwd
+net.tr_in = [];
+net.tr_targets = [];
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gpcovar.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gpcovar.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,38 @@
+function [cov, covf] = gpcovar(net, x)
+%GPCOVAR Calculate the covariance for a Gaussian Process.
+%
+% Description
+%
+% COV = GPCOVAR(NET, X) takes a Gaussian Process data structure NET
+% together with a matrix X of input vectors, and computes the
+% covariance matrix COV. The inverse of this matrix is used when
+% calculating the mean and variance of the predictions made by NET.
+%
+% [COV, COVF] = GPCOVAR(NET, X) also generates the covariance matrix
+% due to the covariance function specified by NET.COVARFN as calculated
+% by GPCOVARF.
+%
+% See also
+% GP, GPPAK, GPUNPAK, GPCOVARP, GPCOVARF, GPFWD, GPERR, GPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'gp', x);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+ndata = size(x, 1);
+
+% Compute prior covariance
+if nargout >= 2
+ [covp, covf] = gpcovarp(net, x, x);
+else
+ covp = gpcovarp(net, x, x);
+end
+
+% Add output noise variance
+cov = covp + (net.min_noise + exp(net.noise))*eye(ndata);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gpcovarf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gpcovarf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function covf = gpcovarf(net, x1, x2)
+%GPCOVARF Calculate the covariance function for a Gaussian Process.
+%
+% Description
+%
+% COVF = GPCOVARF(NET, X1, X2) takes a Gaussian Process data structure
+% NET together with two matrices X1 and X2 of input vectors, and
+% computes the matrix of the covariance function values COVF.
+%
+% See also
+% GP, GPCOVAR, GPCOVARP, GPERR, GPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'gp', x1);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+if size(x1, 2) ~= size(x2, 2)
+ error('Number of variables in x1 and x2 must be the same');
+end
+
+n1 = size(x1, 1);
+n2 = size(x2, 1);
+beta = diag(exp(net.inweights));
+
+% Compute the weighted squared distances between x1 and x2
+z = (x1.*x1)*beta*ones(net.nin, n2) - 2*x1*beta*x2' ...
+ + ones(n1, net.nin)*beta*(x2.*x2)';
+
+switch net.covar_fn
+
+ case 'sqexp' % Squared exponential
+ covf = exp(net.fpar(1) - 0.5*z);
+
+ case 'ratquad' % Rational quadratic
+ nu = exp(net.fpar(2));
+ covf = exp(net.fpar(1))*((ones(size(z)) + z).^(-nu));
+
+ otherwise
+ error(['Unknown covariance function ', net.covar_fn]);
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gpcovarp.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gpcovarp.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function [covp, covf] = gpcovarp(net, x1, x2)
+%GPCOVARP Calculate the prior covariance for a Gaussian Process.
+%
+% Description
+%
+% COVP = GPCOVARP(NET, X1, X2) takes a Gaussian Process data structure
+% NET together with two matrices X1 and X2 of input vectors, and
+% computes the matrix of the prior covariance. This is the function
+% component of the covariance plus the exponential of the bias term.
+%
+% [COVP, COVF] = GPCOVARP(NET, X1, X2) also returns the function
+% component of the covariance.
+%
+% See also
+% GP, GPCOVAR, GPCOVARF, GPERR, GPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'gp', x1);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+if size(x1, 2) ~= size(x2, 2)
+ error('Number of variables in x1 and x2 must be the same');
+end
+
+covf = gpcovarf(net, x1, x2);
+covp = covf + exp(net.bias);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gperr.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gperr.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+function [e, edata, eprior] = gperr(net, x, t)
+%GPERR Evaluate error function for Gaussian Process.
+%
+% Description
+% E = GPERR(NET, X, T) takes a Gaussian Process data structure NET
+% together with a matrix X of input vectors and a matrix T of target
+% vectors, and evaluates the error function E. Each row of X
+% corresponds to one input vector and each row of T corresponds to one
+% target vector.
+%
+% [E, EDATA, EPRIOR] = GPERR(NET, X, T) additionally returns the data
+% and hyperprior components of the error, assuming a Gaussian prior on
+% the weights with mean and variance parameters PRMEAN and PRVARIANCE
+% taken from the network data structure NET.
+%
+% See also
+% GP, GPCOVAR, GPFWD, GPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'gp', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+cn = gpcovar(net, x);
+
+edata = 0.5*(sum(log(eig(cn, 'nobalance'))) + t'*inv(cn)*t);
+
+% Evaluate the hyperprior contribution to the error.
+% The hyperprior is Gaussian with mean pr_mean and variance
+% pr_variance
+if isfield(net, 'pr_mean')
+ w = gppak(net);
+ m = repmat(net.pr_mean, size(w));
+ if size(net.pr_mean) == [1 1]
+ eprior = 0.5*((w-m)*(w-m)');
+ e2 = eprior/net.pr_var;
+ else
+ wpr = repmat(w, size(net.pr_mean, 1), 1)';
+ eprior = 0.5*(((wpr - m').^2).*net.index);
+ e2 = (sum(eprior, 1))*(1./net.pr_var);
+ end
+else
+ e2 = 0;
+ eprior = 0;
+end
+
+e = edata + e2;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gpfwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gpfwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+function [y, sigsq] = gpfwd(net, x, cninv)
+%GPFWD Forward propagation through Gaussian Process.
+%
+% Description
+% Y = GPFWD(NET, X) takes a Gaussian Process data structure NET
+% together with a matrix X of input vectors, and forward propagates
+% the inputs through the model to generate a matrix Y of output
+% vectors. Each row of X corresponds to one input vector and each row
+% of Y corresponds to one output vector. This assumes that the
+% training data (both inputs and targets) has been stored in NET by a
+% call to GPINIT; these are needed to compute the training data
+% covariance matrix.
+%
+% [Y, SIGSQ] = GPFWD(NET, X) also generates a column vector SIGSQ of
+% conditional variances (or squared error bars) where each value
+% corresponds to a pattern.
+%
+% [Y, SIGSQ] = GPFWD(NET, X, CNINV) uses the pre-computed inverse
+% covariance matrix CNINV in the forward propagation. This increases
+% efficiency if several calls to GPFWD are made.
+%
+% See also
+% GP, DEMGP, GPINIT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'gp', x);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+if ~(isfield(net, 'tr_in') & isfield(net, 'tr_targets'))
+ error('Require training inputs and targets');
+end
+
+if nargin == 2
+ % Inverse covariance matrix not supplied.
+ cninv = inv(gpcovar(net, net.tr_in));
+end
+ktest = gpcovarp(net, x, net.tr_in);
+
+% Predict mean
+y = ktest*cninv*net.tr_targets;
+
+if nargout >= 2
+ % Predict error bar
+ ndata = size(x, 1);
+ sigsq = (ones(ndata, 1) * gpcovarp(net, x(1,:), x(1,:))) ...
+ - sum((ktest*cninv).*ktest, 2);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gpgrad.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gpgrad.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,100 @@
+function g = gpgrad(net, x, t)
+%GPGRAD Evaluate error gradient for Gaussian Process.
+%
+% Description
+% G = GPGRAD(NET, X, T) takes a Gaussian Process data structure NET
+% together with a matrix X of input vectors and a matrix T of target
+% vectors, and evaluates the error gradient G. Each row of X
+% corresponds to one input vector and each row of T corresponds to one
+% target vector.
+%
+% See also
+% GP, GPCOVAR, GPFWD, GPERR
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'gp', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+% Evaluate derivatives with respect to each hyperparameter in turn.
+ndata = size(x, 1);
+[cov, covf] = gpcovar(net, x);
+cninv = inv(cov);
+trcninv = trace(cninv);
+cninvt = cninv*t;
+
+% Function parameters
+switch net.covar_fn
+
+ case 'sqexp' % Squared exponential
+ gfpar = trace(cninv*covf) - cninvt'*covf*cninvt;
+
+ case 'ratquad' % Rational quadratic
+ beta = diag(exp(net.inweights));
+ gfpar(1) = trace(cninv*covf) - cninvt'*covf*cninvt;
+ D2 = (x.*x)*beta*ones(net.nin, ndata) - 2*x*beta*x' ...
+ + ones(ndata, net.nin)*beta*(x.*x)';
+ E = ones(size(D2));
+ L = - exp(net.fpar(2)) * covf .* log(E + D2); % d(cn)/d(nu)
+ gfpar(2) = trace(cninv*L) - cninvt'*L*cninvt;
+
+ otherwise
+ error(['Unknown covariance function ', net.covar_fn]);
+end
+
+% Bias derivative
+ndata = size(x, 1);
+fac = exp(net.bias)*ones(ndata);
+gbias = trace(cninv*fac) - cninvt'*fac*cninvt;
+
+% Noise derivative
+gnoise = exp(net.noise)*(trcninv - cninvt'*cninvt);
+
+% Input weight derivatives
+if strcmp(net.covar_fn, 'ratquad')
+ F = (exp(net.fpar(2))*E)./(E + D2);
+end
+
+nparams = length(net.inweights);
+for l = 1 : nparams
+ vect = x(:, l);
+ matx = (vect.*vect)*ones(1, ndata) ...
+ - 2.0*vect*vect' ...
+ + ones(ndata, 1)*(vect.*vect)';
+ switch net.covar_fn
+ case 'sqexp' % Squared exponential
+ dmat = -0.5*exp(net.inweights(l))*covf.*matx;
+
+ case 'ratquad' % Rational quadratic
+ dmat = - exp(net.inweights(l))*covf.*matx.*F;
+ otherwise
+ error(['Unknown covariance function ', net.covar_fn]);
+ end
+
+ gw1(l) = trace(cninv*dmat) - cninvt'*dmat*cninvt;
+end
+
+g1 = [gbias, gnoise, gw1, gfpar];
+g1 = 0.5*g1;
+
+% Evaluate the prior contribution to the gradient.
+if isfield(net, 'pr_mean')
+ w = gppak(net);
+ m = repmat(net.pr_mean, size(w));
+ if size(net.pr_mean) == [1 1]
+ gprior = w - m;
+ g2 = gprior/net.pr_var;
+ else
+ ngroups = size(net.pr_mean, 1);
+ gprior = net.index'.*(ones(ngroups, 1)*w - m);
+ g2 = (1./net.pr_var)'*gprior;
+ end
+else
+ gprior = 0;
+ g2 = 0;
+end
+
+g = g1 + g2;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gpinit.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gpinit.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+function net = gpinit(net, tr_in, tr_targets, prior)
+%GPINIT Initialise Gaussian Process model.
+%
+% Description
+% NET = GPINIT(NET, TRIN, TRTARGETS) takes a Gaussian Process data
+% structure NET together with a matrix TRIN of training input vectors
+% and a matrix TRTARGETS of training target vectors, and stores them
+% in NET. These datasets are required if the corresponding inverse
+% covariance matrix is not supplied to GPFWD. This is important if the
+% data structure is saved and then reloaded before calling GPFWD. Each
+% row of TRIN corresponds to one input vector and each row of TRTARGETS
+% corresponds to one target vector.
+%
+% NET = GPINIT(NET, TRIN, TRTARGETS, PRIOR) additionally initialises
+% the parameters in NET from the PRIOR data structure which contains
+% the mean and variance of the Gaussian distribution which is sampled
+% from.
+%
+% See also
+% GP, GPFWD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'gp', tr_in, tr_targets);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+if nargin >= 4
+ % Initialise weights at random
+ if size(prior.pr_mean) == [1 1]
+ w = randn(1, net.nwts).*sqrt(prior.pr_var) + ...
+ repmat(prior.pr_mean, 1, net.nwts);
+ else
+ sig = sqrt(prior.index*prior.pr_var);
+ w = sig'.*randn(1, net.nwts) + (prior.index*prior.pr_mean)';
+ end
+ net = gpunpak(net, w);
+end
+
+net.tr_in = tr_in;
+net.tr_targets = tr_targets;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gppak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gppak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function hp = gppak(net)
+%GPPAK Combines GP hyperparameters into one vector.
+%
+% Description
+% HP = GPPAK(NET) takes a Gaussian Process data structure NET and
+% combines the hyperparameters into a single row vector HP.
+%
+% See also
+% GP, GPUNPAK, GPFWD, GPERR, GPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'gp');
+if ~isempty(errstring);
+ error(errstring);
+end
+hp = [net.bias, net.noise, net.inweights, net.fpar];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gpunpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gpunpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,36 @@
+function net = gpunpak(net, hp)
+%GPUNPAK Separates hyperparameter vector into components.
+%
+% Description
+% NET = GPUNPAK(NET, HP) takes an Gaussian Process data structure NET
+% and a hyperparameter vector HP, and returns a Gaussian Process data
+% structure identical to the input model, except that the covariance
+% bias BIAS, output noise NOISE, the input weight vector INWEIGHTS and
+% the vector of covariance function specific parameters FPAR have all
+% been set to the corresponding elements of HP.
+%
+% See also
+% GP, GPPAK, GPFWD, GPERR, GPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'gp');
+if ~isempty(errstring);
+ error(errstring);
+end
+if net.nwts ~= length(hp)
+ error('Invalid weight vector length');
+end
+
+net.bias = hp(1);
+net.noise = hp(2);
+
+% Unpack input weights
+mark1 = 2 + net.nin;
+net.inweights = hp(3:mark1);
+
+% Unpack function specific parameters
+net.fpar = hp(mark1 + 1:size(hp, 2));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gradchek.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gradchek.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function [gradient, delta] = gradchek(w, func, grad, varargin)
+%GRADCHEK Checks a user-defined gradient function using finite differences.
+%
+% Description
+% This function is intended as a utility for other netlab functions
+% (particularly optimisation functions) to use. It enables the user to
+% check whether a gradient calculation has been correctly implmented
+% for a given function. GRADCHEK(W, FUNC, GRAD) checks how accurate the
+% gradient GRAD of a function FUNC is at a parameter vector X. A
+% central difference formula with step size 1.0e-6 is used, and the
+% results for both gradient function and finite difference
+% approximation are printed. The optional return value GRADIENT is the
+% gradient calculated using the function GRAD and the return value
+% DELTA is the difference between the functional and finite difference
+% methods of calculating the graident.
+%
+% GRADCHEK(X, FUNC, GRAD, P1, P2, ...) allows additional arguments to
+% be passed to FUNC and GRAD.
+%
+% See also
+% CONJGRAD, GRADDESC, HMC, OLGD, QUASINEW, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Reasonable value for step size
+epsilon = 1.0e-6;
+
+func = fcnchk(func, length(varargin));
+grad = fcnchk(grad, length(varargin));
+
+% Treat
+nparams = length(w);
+deltaf = zeros(1, nparams);
+step = zeros(1, nparams);
+for i = 1:nparams
+ % Move a small way in the ith coordinate of w
+ step(i) = 1.0;
+ fplus = feval('linef', epsilon, func, w, step, varargin{:});
+ fminus = feval('linef', -epsilon, func, w, step, varargin{:});
+ % Use central difference formula for approximation
+ deltaf(i) = 0.5*(fplus - fminus)/epsilon;
+ step(i) = 0.0;
+end
+gradient = feval(grad, w, varargin{:});
+fprintf(1, 'Checking gradient ...\n\n');
+delta = gradient - deltaf;
+fprintf(1, ' analytic diffs delta\n\n');
+disp([gradient', deltaf', delta'])
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/graddesc.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/graddesc.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,180 @@
+function [x, options, flog, pointlog] = graddesc(f, x, options, gradf, ...
+ varargin)
+%GRADDESC Gradient descent optimization.
+%
+% Description
+% [X, OPTIONS, FLOG, POINTLOG] = GRADDESC(F, X, OPTIONS, GRADF) uses
+% batch gradient descent to find a local minimum of the function F(X)
+% whose gradient is given by GRADF(X). A log of the function values
+% after each cycle is (optionally) returned in ERRLOG, and a log of the
+% points visited is (optionally) returned in POINTLOG.
+%
+% Note that X is a row vector and F returns a scalar value. The point
+% at which F has a local minimum is returned as X. The function value
+% at that point is returned in OPTIONS(8).
+%
+% GRADDESC(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional
+% arguments to be passed to F() and GRADF().
+%
+% The optional parameters have the following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values; also logs error
+% values in the return argument ERRLOG, and the points visited in the
+% return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
+% warning messages are displayed. If OPTIONS(1) is -1, then nothing is
+% displayed.
+%
+% OPTIONS(2) is the absolute precision required for the value of X at
+% the solution. If the absolute difference between the values of X
+% between two successive steps is less than OPTIONS(2), then this
+% condition is satisfied.
+%
+% OPTIONS(3) is a measure of the precision required of the objective
+% function at the solution. If the absolute difference between the
+% objective function values between two successive steps is less than
+% OPTIONS(3), then this condition is satisfied. Both this and the
+% previous condition must be satisfied for termination.
+%
+% OPTIONS(7) determines the line minimisation method used. If it is
+% set to 1 then a line minimiser is used (in the direction of the
+% negative gradient). If it is 0 (the default), then each parameter
+% update is a fixed multiple (the learning rate) of the negative
+% gradient added to a fixed multiple (the momentum) of the previous
+% parameter update.
+%
+% OPTIONS(9) should be set to 1 to check the user defined gradient
+% function GRADF with GRADCHEK. This is carried out at the initial
+% parameter vector X.
+%
+% OPTIONS(10) returns the total number of function evaluations
+% (including those in any line searches).
+%
+% OPTIONS(11) returns the total number of gradient evaluations.
+%
+% OPTIONS(14) is the maximum number of iterations; default 100.
+%
+% OPTIONS(15) is the precision in parameter space of the line search;
+% default FOPTIONS(2).
+%
+% OPTIONS(17) is the momentum; default 0.5. It should be scaled by the
+% inverse of the number of data points.
+%
+% OPTIONS(18) is the learning rate; default 0.01. It should be scaled
+% by the inverse of the number of data points.
+%
+% See also
+% CONJGRAD, LINEMIN, OLGD, MINBRACK, QUASINEW, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set up the options.
+if length(options) < 18
+ error('Options vector too short')
+end
+
+if (options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+
+line_min_flag = 0; % Flag for line minimisation option
+if (round(options(7)) == 1)
+ % Use line minimisation
+ line_min_flag = 1;
+ % Set options for line minimiser
+ line_options = foptions;
+ if options(15) > 0
+ line_options(2) = options(15);
+ end
+else
+ % Learning rate: must be positive
+ if (options(18) > 0)
+ eta = options(18);
+ else
+ eta = 0.01;
+ end
+ % Momentum term: allow zero momentum
+ if (options(17) >= 0)
+ mu = options(17);
+ else
+ mu = 0.5;
+ end
+end
+
+% Check function string
+f = fcnchk(f, length(varargin));
+gradf = fcnchk(gradf, length(varargin));
+
+% Display information if options(1) > 0
+display = options(1) > 0;
+
+% Work out if we need to compute f at each iteration.
+% Needed if using line search or if display results or if termination
+% criterion requires it.
+fcneval = (options(7) | display | options(3));
+
+% Check gradients
+if (options(9) > 0)
+ feval('gradchek', x, f, gradf, varargin{:});
+end
+
+dxold = zeros(1, size(x, 2));
+xold = x;
+fold = 0; % Must be initialised so that termination test can be performed
+if fcneval
+ fnew = feval(f, x, varargin{:});
+ options(10) = options(10) + 1;
+ fold = fnew;
+end
+
+% Main optimization loop.
+for j = 1:niters
+ xold = x;
+ grad = feval(gradf, x, varargin{:});
+ options(11) = options(11) + 1; % Increment gradient evaluation counter
+ if (line_min_flag ~= 1)
+ dx = mu*dxold - eta*grad;
+ x = x + dx;
+ dxold = dx;
+ if fcneval
+ fold = fnew;
+ fnew = feval(f, x, varargin{:});
+ options(10) = options(10) + 1;
+ end
+ else
+ sd = - grad./norm(grad); % New search direction.
+ fold = fnew;
+ % Do a line search: normalise search direction to have length 1
+ [lmin, line_options] = feval('linemin', f, x, sd, fold, ...
+ line_options, varargin{:});
+ options(10) = options(10) + line_options(10);
+ x = xold + lmin*sd;
+ fnew = line_options(8);
+ end
+ if nargout >= 3
+ flog(j) = fnew;
+ if nargout >= 4
+ pointlog(j, :) = x;
+ end
+ end
+ if display
+ fprintf(1, 'Cycle %5d Function %11.8f\n', j, fnew);
+ end
+ if (max(abs(x - xold)) < options(2) & abs(fnew - fold) < options(3))
+ % Termination criteria are met
+ options(8) = fnew;
+ return;
+ end
+end
+
+if fcneval
+ options(8) = fnew;
+else
+ options(8) = feval(f, x, varargin{:});
+ options(10) = options(10) + 1;
+end
+if (options(1) >= 0)
+ disp(maxitmess);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gsamp.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gsamp.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,32 @@
+function x = gsamp(mu, covar, nsamp)
+%GSAMP Sample from a Gaussian distribution.
+%
+% Description
+%
+% X = GSAMP(MU, COVAR, NSAMP) generates a sample of size NSAMP from a
+% D-dimensional Gaussian distribution. The Gaussian density has mean
+% vector MU and covariance matrix COVAR, and the matrix X has NSAMP
+% rows in which each row represents a D-dimensional sample vector.
+%
+% See also
+% GAUSS, DEMGAUSS
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+d = size(covar, 1);
+
+mu = reshape(mu, 1, d); % Ensure that mu is a row vector
+
+[evec, eval] = eig(covar);
+
+deig=diag(eval);
+
+if (~isreal(deig)) | any(deig<0),
+ warning('Covariance Matrix is not OK, redefined to be positive definite');
+ eval=abs(eval);
+end
+
+coeffs = randn(nsamp, d)*sqrt(eval);
+
+x = ones(nsamp, 1)*mu + coeffs*evec';
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gtm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gtm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,61 @@
+function net = gtm(dim_latent, nlatent, dim_data, ncentres, rbfunc, ...
+ prior)
+%GTM Create a Generative Topographic Map.
+%
+% Description
+%
+% NET = GTM(DIMLATENT, NLATENT, DIMDATA, NCENTRES, RBFUNC), takes the
+% dimension of the latent space DIMLATENT, the number of data points
+% sampled in the latent space NLATENT, the dimension of the data space
+% DIMDATA, the number of centres in the RBF model NCENTRES, the
+% activation function for the RBF RBFUNC and returns a data structure
+% NET. The parameters in the RBF and GMM sub-models are set by calls to
+% the corresponding creation routines RBF and GMM.
+%
+% The fields in NET are
+% type = 'gtm'
+% nin = dimension of data space
+% dimlatent = dimension of latent space
+% rbfnet = RBF network data structure
+% gmmnet = GMM data structure
+% X = sample of latent points
+%
+% NET = GTM(DIMLATENT, NLATENT, DIMDATA, NCENTRES, RBFUNC, PRIOR),
+% sets a Gaussian zero mean prior on the parameters of the RBF model.
+% PRIOR must be a scalar and represents the inverse variance of the
+% prior distribution. This gives rise to a weight decay term in the
+% error function.
+%
+% See also
+% GTMFWD, GTMPOST, RBF, GMM
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+net.type = 'gtm';
+% Input to functions is data
+net.nin = dim_data;
+net.dim_latent = dim_latent;
+
+% Default is no regularisation
+if nargin == 5
+ prior = 0.0;
+end
+
+% Only allow scalar prior
+if isstruct(prior) | size(prior) ~= [1 1]
+ error('Prior must be a scalar');
+end
+
+% Create RBF network
+net.rbfnet = rbf(dim_latent, ncentres, dim_data, rbfunc, ...
+ 'linear', prior);
+
+% Mask all but output weights
+net.rbfnet.mask = rbfprior(rbfunc, dim_latent, ncentres, dim_data);
+
+% Create field for GMM output model
+net.gmmnet = gmm(dim_data, nlatent, 'spherical');
+
+% Create empty latent data sample
+net.X = [];
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gtmem.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gtmem.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,135 @@
+function [net, options, errlog] = gtmem(net, t, options)
+%GTMEM EM algorithm for Generative Topographic Mapping.
+%
+% Description
+% [NET, OPTIONS, ERRLOG] = GTMEM(NET, T, OPTIONS) uses the Expectation
+% Maximization algorithm to estimate the parameters of a GTM defined by
+% a data structure NET. The matrix T represents the data whose
+% expectation is maximized, with each row corresponding to a vector.
+% It is assumed that the latent data NET.X has been set following a
+% call to GTMINIT, for example. The optional parameters have the
+% following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values; also logs error
+% values in the return argument ERRLOG. If OPTIONS(1) is set to 0, then
+% only warning messages are displayed. If OPTIONS(1) is -1, then
+% nothing is displayed.
+%
+% OPTIONS(3) is a measure of the absolute precision required of the
+% error function at the solution. If the change in log likelihood
+% between two steps of the EM algorithm is less than this value, then
+% the function terminates.
+%
+% OPTIONS(14) is the maximum number of iterations; default 100.
+%
+% The optional return value OPTIONS contains the final error value
+% (i.e. data log likelihood) in OPTIONS(8).
+%
+% See also
+% GTM, GTMINIT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check that inputs are consistent
+errstring = consist(net, 'gtm', t);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+% Sort out the options
+if (options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+
+display = options(1);
+store = 0;
+if (nargout > 2)
+ store = 1; % Store the error values to return them
+ errlog = zeros(1, niters);
+end
+test = 0;
+if options(3) > 0.0
+ test = 1; % Test log likelihood for termination
+end
+
+% Calculate various quantities that remain constant during training
+[ndata, tdim] = size(t);
+ND = ndata*tdim;
+[net.gmmnet.centres, Phi] = rbffwd(net.rbfnet, net.X);
+Phi = [Phi ones(size(net.X, 1), 1)];
+PhiT = Phi';
+[K, Mplus1] = size(Phi);
+
+A = zeros(Mplus1, Mplus1);
+cholDcmp = zeros(Mplus1, Mplus1);
+% Use a sparse representation for the weight regularizing matrix.
+if (net.rbfnet.alpha > 0)
+ Alpha = net.rbfnet.alpha*speye(Mplus1);
+ Alpha(Mplus1, Mplus1) = 0;
+end
+
+for n = 1:niters
+ % Calculate responsibilities
+ [R, act] = gtmpost(net, t);
+ % Calculate error value if needed
+ if (display | store | test)
+ prob = act*(net.gmmnet.priors)';
+ % Error value is negative log likelihood of data
+ e = - sum(log(max(prob,eps)));
+ if store
+ errlog(n) = e;
+ end
+ if display > 0
+ fprintf(1, 'Cycle %4d Error %11.6f\n', n, e);
+ end
+ if test
+ if (n > 1 & abs(e - eold) < options(3))
+ options(8) = e;
+ return;
+ else
+ eold = e;
+ end
+ end
+ end
+
+ % Calculate matrix be inverted (Phi'*G*Phi + alpha*I in the papers).
+ % Sparse representation of G normally executes faster and saves
+ % memory
+ if (net.rbfnet.alpha > 0)
+ A = full(PhiT*spdiags(sum(R)', 0, K, K)*Phi + ...
+ (Alpha.*net.gmmnet.covars(1)));
+ else
+ A = full(PhiT*spdiags(sum(R)', 0, K, K)*Phi);
+ end
+ % A is a symmetric matrix likely to be positive definite, so try
+ % fast Cholesky decomposition to calculate W, otherwise use SVD.
+ % (PhiT*(R*t)) is computed right-to-left, as R
+ % and t are normally (much) larger than PhiT.
+ [cholDcmp singular] = chol(A);
+ if (singular)
+ if (display)
+ fprintf(1, ...
+ 'gtmem: Warning -- M-Step matrix singular, using pinv.\n');
+ end
+ W = pinv(A)*(PhiT*(R'*t));
+ else
+ W = cholDcmp \ (cholDcmp' \ (PhiT*(R'*t)));
+ end
+ % Put new weights into network to calculate responsibilities
+ % net.rbfnet = netunpak(net.rbfnet, W);
+ net.rbfnet.w2 = W(1:net.rbfnet.nhidden, :);
+ net.rbfnet.b2 = W(net.rbfnet.nhidden+1, :);
+ % Calculate new distances
+ d = dist2(t, Phi*W);
+
+ % Calculate new value for beta
+ net.gmmnet.covars = ones(1, net.gmmnet.ncentres)*(sum(sum(d.*R))/ND);
+end
+
+options(8) = -sum(log(gtmprob(net, t)));
+if (display >= 0)
+ disp(maxitmess);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gtmfwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gtmfwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,17 @@
+function mix = gtmfwd(net)
+%GTMFWD Forward propagation through GTM.
+%
+% Description
+% MIX = GTMFWD(NET) takes a GTM structure NET, and forward propagates
+% the latent data sample NET.X through the GTM to generate the
+% structure MIX which represents the Gaussian mixture model in data
+% space.
+%
+% See also
+% GTM
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+net.gmmnet.centres = rbffwd(net.rbfnet, net.X);
+mix = net.gmmnet;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gtminit.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gtminit.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,153 @@
+function net = gtminit(net, options, data, samp_type, varargin)
+%GTMINIT Initialise the weights and latent sample in a GTM.
+%
+% Description
+% NET = GTMINIT(NET, OPTIONS, DATA, SAMPTYPE) takes a GTM NET and
+% generates a sample of latent data points and sets the centres (and
+% widths if appropriate) of NET.RBFNET.
+%
+% If the SAMPTYPE is 'REGULAR', then regular grids of latent data
+% points and RBF centres are created. The dimension of the latent data
+% space must be 1 or 2. For one-dimensional latent space, the
+% LSAMPSIZE parameter gives the number of latent points and the
+% RBFSAMPSIZE parameter gives the number of RBF centres. For a two-
+% dimensional latent space, these parameters must be vectors of length
+% 2 with the number of points in each of the x and y directions to
+% create a rectangular grid. The widths of the RBF basis functions are
+% set by a call to RBFSETFW passing OPTIONS(7) as the scaling
+% parameter.
+%
+% If the SAMPTYPE is 'UNIFORM' or 'GAUSSIAN' then the latent data is
+% found by sampling from a uniform or Gaussian distribution
+% correspondingly. The RBF basis function parameters are set by a call
+% to RBFSETBF with the DATA parameter as dataset and the OPTIONS
+% vector.
+%
+% Finally, the output layer weights of the RBF are initialised by
+% mapping the mean of the latent variable to the mean of the target
+% variable, and the L-dimensional latent variale variance to the
+% variance of the targets along the first L principal components.
+%
+% See also
+% GTM, GTMEM, PCA, RBFSETBF, RBFSETFW
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check for consistency
+errstring = consist(net, 'gtm', data);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+% Check type of sample
+stypes = {'regular', 'uniform', 'gaussian'};
+if (strcmp(samp_type, stypes)) == 0
+ error('Undefined sample type.')
+end
+
+if net.dim_latent > size(data, 2)
+ error('Latent space dimension must not be greater than data dimension')
+end
+nlatent = net.gmmnet.ncentres;
+nhidden = net.rbfnet.nhidden;
+
+% Create latent data sample and set RBF centres
+
+switch samp_type
+case 'regular'
+ if nargin ~= 6
+ error('Regular type must specify latent and RBF shapes');
+ end
+ l_samp_size = varargin{1};
+ rbf_samp_size = varargin{2};
+ if round(l_samp_size) ~= l_samp_size
+ error('Latent sample specification must contain integers')
+ end
+ % Check existence and size of rbf specification
+ if any(size(rbf_samp_size) ~= [1 net.dim_latent]) | ...
+ prod(rbf_samp_size) ~= nhidden
+ error('Incorrect specification of RBF centres')
+ end
+ % Check dimension and type of latent data specification
+ if any(size(l_samp_size) ~= [1 net.dim_latent]) | ...
+ prod(l_samp_size) ~= nlatent
+ error('Incorrect dimension of latent sample spec.')
+ end
+ if net.dim_latent == 1
+ net.X = [-1:2/(l_samp_size-1):1]';
+ net.rbfnet.c = [-1:2/(rbf_samp_size-1):1]';
+ net.rbfnet = rbfsetfw(net.rbfnet, options(7));
+ elseif net.dim_latent == 2
+ net.X = gtm_rctg(l_samp_size);
+ net.rbfnet.c = gtm_rctg(rbf_samp_size);
+ net.rbfnet = rbfsetfw(net.rbfnet, options(7));
+ else
+ error('For regular sample, input dimension must be 1 or 2.')
+ end
+
+
+case {'uniform', 'gaussian'}
+ if strcmp(samp_type, 'uniform')
+ net.X = 2 * (rand(nlatent, net.dim_latent) - 0.5);
+ else
+ % Sample from N(0, 0.25) distribution to ensure most latent
+ % data is inside square
+ net.X = randn(nlatent, net.dim_latent)/2;
+ end
+ net.rbfnet = rbfsetbf(net.rbfnet, options, net.X);
+otherwise
+ % Shouldn't get here
+ error('Invalid sample type');
+
+end
+
+% Latent data sample and basis function parameters chosen.
+% Now set output weights
+[PCcoeff, PCvec] = pca(data);
+
+% Scale PCs by eigenvalues
+A = PCvec(:, 1:net.dim_latent)*diag(sqrt(PCcoeff(1:net.dim_latent)));
+
+[temp, Phi] = rbffwd(net.rbfnet, net.X);
+% Normalise X to ensure 1:1 mapping of variances and calculate weights
+% as solution of Phi*W = normX*A'
+normX = (net.X - ones(size(net.X))*diag(mean(net.X)))*diag(1./std(net.X));
+net.rbfnet.w2 = Phi \ (normX*A');
+% Bias is mean of target data
+net.rbfnet.b2 = mean(data);
+
+% Must also set initial value of variance
+% Find average distance between nearest centres
+% Ensure that distance of centre to itself is excluded by setting diagonal
+% entries to realmax
+net.gmmnet.centres = rbffwd(net.rbfnet, net.X);
+d = dist2(net.gmmnet.centres, net.gmmnet.centres) + ...
+ diag(ones(net.gmmnet.ncentres, 1)*realmax);
+sigma = mean(min(d))/2;
+
+% Now set covariance to minimum of this and next largest eigenvalue
+if net.dim_latent < size(data, 2)
+ sigma = min(sigma, PCcoeff(net.dim_latent+1));
+end
+net.gmmnet.covars = sigma*ones(1, net.gmmnet.ncentres);
+
+% Sub-function to create the sample data in 2d
+function sample = gtm_rctg(samp_size)
+
+xDim = samp_size(1);
+yDim = samp_size(2);
+% Produce a grid with the right number of rows and columns
+[X, Y] = meshgrid([0:1:(xDim-1)], [(yDim-1):-1:0]);
+
+% Change grid representation
+sample = [X(:), Y(:)];
+
+% Shift grid to correct position and scale it
+maxXY= max(sample);
+sample(:,1) = 2*(sample(:,1) - maxXY(1)/2)./maxXY(1);
+sample(:,2) = 2*(sample(:,2) - maxXY(2)/2)./maxXY(2);
+return;
+
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gtmlmean.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gtmlmean.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function means = gtmlmean(net, data)
+%GTMLMEAN Mean responsibility for data in a GTM.
+%
+% Description
+% MEANS = GTMLMEAN(NET, DATA) takes a GTM structure NET, and computes
+% the means of the responsibility distributions for each data point in
+% DATA.
+%
+% See also
+% GTM, GTMPOST, GTMLMODE
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check for consistency
+errstring = consist(net, 'gtm', data);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+R = gtmpost(net, data);
+means = R*net.X;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gtmlmode.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gtmlmode.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function modes = gtmlmode(net, data)
+%GTMLMODE Mode responsibility for data in a GTM.
+%
+% Description
+% MODES = GTMLMODE(NET, DATA) takes a GTM structure NET, and computes
+% the modes of the responsibility distributions for each data point in
+% DATA. These will always lie at one of the latent space sample points
+% NET.X.
+%
+% See also
+% GTM, GTMPOST, GTMLMEAN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check for consistency
+errstring = consist(net, 'gtm', data);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+R = gtmpost(net, data);
+% Mode is maximum responsibility
+[max_resp, max_index] = max(R, [], 2);
+modes = net.X(max_index, :);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gtmmag.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gtmmag.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,27 @@
+function mags = gtmmag(net, latent_data)
+%GTMMAG Magnification factors for a GTM
+%
+% Description
+% MAGS = GTMMAG(NET, LATENTDATA) takes a GTM structure NET, and
+% computes the magnification factors for each point the latent space
+% contained in LATENTDATA.
+%
+% See also
+% GTM, GTMPOST, GTMLMEAN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'gtm');
+if ~isempty(errstring)
+ error(errstring);
+end
+
+Jacs = rbfjacob(net.rbfnet, latent_data);
+nlatent = size(latent_data, 1);
+mags = zeros(nlatent, 1);
+temp = zeros(net.rbfnet.nin, net.rbfnet.nout);
+for m = 1:nlatent
+ temp = squeeze(Jacs(m, :, :)); % Turn into a 2d matrix
+ mags(m) = sqrt(det(temp*temp'));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gtmpost.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gtmpost.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function [post, a] = gtmpost(net, data)
+%GTMPOST Latent space responsibility for data in a GTM.
+%
+% Description
+% POST = GTMPOST(NET, DATA) takes a GTM structure NET, and computes
+% the responsibility at each latent space sample point NET.X for each
+% data point in DATA.
+%
+% [POST, A] = GTMPOST(NET, DATA) also returns the activations A of the
+% GMM NET.GMMNET as computed by GMMPOST.
+%
+% See also
+% GTM, GTMEM, GTMLMEAN, GMLMODE, GMMPROB
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check for consistency
+errstring = consist(net, 'gtm', data);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+net.gmmnet.centres = rbffwd(net.rbfnet, net.X);
+
+[post, a] = gmmpost(net.gmmnet, data);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/gtmprob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/gtmprob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,22 @@
+function prob = gtmprob(net, data)
+%GTMPROB Probability for data under a GTM.
+%
+% Description
+% PROB = GTMPROB(NET, DATA) takes a GTM structure NET, and computes
+% the probability of each point in the dataset DATA.
+%
+% See also
+% GTM, GTMEM, GTMLMEAN, GTMLMODE, GTMPOST
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check for consistency
+errstring = consist(net, 'gtm', data);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+net.gmmnet.centres = rbffwd(net.rbfnet, net.X);
+
+prob = gmmprob(net.gmmnet, data);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/hbayes.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/hbayes.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function [h, hdata] = hbayes(net, hdata)
+%HBAYES Evaluate Hessian of Bayesian error function for network.
+%
+% Description
+% H = HBAYES(NET, HDATA) takes a network data structure NET together
+% the data contribution to the Hessian for a set of inputs and targets.
+% It returns the regularised Hessian using any zero mean Gaussian
+% priors on the weights defined in NET. In addition, if a MASK is
+% defined in NET, then the entries in H that correspond to weights with
+% a 0 in the mask are removed.
+%
+% [H, HDATA] = HBAYES(NET, HDATA) additionally returns the data
+% component of the Hessian.
+%
+% See also
+% GBAYES, GLMHESS, MLPHESS, RBFHESS
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if (isfield(net, 'mask'))
+ % Extract relevant entries in Hessian
+ nmask_rows = size(find(net.mask), 1);
+ hdata = reshape(hdata(logical(net.mask*(net.mask'))), ...
+ nmask_rows, nmask_rows);
+ nwts = nmask_rows;
+else
+ nwts = net.nwts;
+end
+if isfield(net, 'beta')
+ h = net.beta*hdata;
+else
+ h = hdata;
+end
+
+if isfield(net, 'alpha')
+ if size(net.alpha) == [1 1]
+ h = h + net.alpha*eye(nwts);
+ else
+ if isfield(net, 'mask')
+ nindx_cols = size(net.index, 2);
+ index = reshape(net.index(logical(repmat(net.mask, ...
+ 1, nindx_cols))), nmask_rows, nindx_cols);
+ else
+ index = net.index;
+ end
+ h = h + diag(index*net.alpha);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/hesschek.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/hesschek.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,60 @@
+function h = hesschek(net, x, t)
+%HESSCHEK Use central differences to confirm correct evaluation of Hessian matrix.
+%
+% Description
+%
+% HESSCHEK(NET, X, T) takes a network data structure NET, together with
+% input and target data matrices X and T, and compares the evaluation
+% of the Hessian matrix using the function NETHESS and using central
+% differences with the function NETERR.
+%
+% The optional return value H is the Hessian computed using NETHESS.
+%
+% See also
+% NETHESS, NETERR
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+w0 = netpak(net);
+nwts = length(w0);
+h = nethess(w0, net, x, t);
+
+w = w0;
+hcent = zeros(nwts, nwts);
+h1 = 0.0; h2 = 0.0; h3 = 0.0; h4 = 0.0;
+epsilon = 1.0e-4;
+fprintf(1, 'Checking Hessian ...\n\n');
+for k = 1:nwts;
+ for l = 1:nwts;
+ if(l == k)
+ w(k) = w0(k) + 2.0*epsilon;
+ h1 = neterr(w, net, x, t);
+ w(k) = w0(k) - 2.0*epsilon;
+ h2 = neterr(w, net, x, t);
+ w(k) = w0(k);
+ h3 = neterr(w, net, x, t);
+ hcent(k, k) = (h1 + h2 - 2.0*h3)/(4.0*epsilon^2);
+ else
+ w(k) = w0(k) + epsilon;
+ w(l) = w0(l) + epsilon;
+ h1 = neterr(w, net, x, t);
+ w(k) = w0(k) - epsilon;
+ w(l) = w0(l) - epsilon;
+ h2 = neterr(w, net, x, t);
+ w(k) = w0(k) + epsilon;
+ w(l) = w0(l) - epsilon;
+ h3 = neterr(w, net, x, t);
+ w(k) = w0(k) - epsilon;
+ w(l) = w0(l) + epsilon;
+ h4 = neterr(w, net, x, t);
+ hcent(k, l) = (h1 + h2 - h3 - h4)/(4.0*epsilon^2);
+ w(k) = w0(k);
+ w(l) = w0(l);
+ end
+ end
+end
+
+fprintf(1, ' analytical numerical delta\n\n');
+temp = [h(:), hcent(:), (h(:) - hcent(:))];
+fprintf(1, '%12.6f %12.6f %12.6f\n', temp');
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/hintmat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/hintmat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function [xvals, yvals, color] = hintmat(w);
+%HINTMAT Evaluates the coordinates of the patches for a Hinton diagram.
+%
+% Description
+% [xvals, yvals, color] = hintmat(w)
+% takes a matrix W and returns coordinates XVALS, YVALS for the
+% patches comrising the Hinton diagram, together with a vector COLOR
+% labelling the color (black or white) of the corresponding elements
+% according to their sign.
+%
+% See also
+% HINTON
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set scale to be up to 0.9 of maximum absolute weight value, where scale
+% defined so that area of box proportional to weight value.
+
+w = flipud(w);
+[nrows, ncols] = size(w);
+
+scale = 0.45*sqrt(abs(w)/max(max(abs(w))));
+scale = scale(:);
+color = 0.5*(sign(w(:)) + 3);
+
+delx = 1;
+dely = 1;
+[X, Y] = meshgrid(0.5*delx:delx:(ncols-0.5*delx), 0.5*dely:dely:(nrows-0.5*dely));
+
+% Now convert from matrix format to column vector format, and then duplicate
+% columns with appropriate offsets determined by normalized weight magnitudes.
+
+xtemp = X(:);
+ytemp = Y(:);
+
+xvals = [xtemp-delx*scale, xtemp+delx*scale, ...
+ xtemp+delx*scale, xtemp-delx*scale];
+yvals = [ytemp-dely*scale, ytemp-dely*scale, ...
+ ytemp+dely*scale, ytemp+dely*scale];
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/hinton.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/hinton.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+function h = hinton(w);
+%HINTON Plot Hinton diagram for a weight matrix.
+%
+% Description
+%
+% HINTON(W) takes a matrix W and plots the Hinton diagram.
+%
+% H = HINTON(NET) also returns the figure handle H which can be used,
+% for instance, to delete the figure when it is no longer needed.
+%
+% To print the figure correctly in black and white, you should call
+% SET(H, 'INVERTHARDCOPY', 'OFF') before printing.
+%
+% See also
+% DEMHINT, HINTMAT, MLPHINT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set scale to be up to 0.9 of maximum absolute weight value, where scale
+% defined so that area of box proportional to weight value.
+
+% Use no more than 640x480 pixels
+xmax = 640; ymax = 480;
+
+% Offset bottom left hand corner
+x01 = 40; y01 = 40;
+x02 = 80; y02 = 80;
+
+% Need to allow 5 pixels border for window frame: but 30 at top
+border = 5;
+top_border = 30;
+
+ymax = ymax - top_border;
+xmax = xmax - border;
+
+% First layer
+
+[xvals, yvals, color] = hintmat(w);
+% Try to preserve aspect ratio approximately
+if (8*size(w, 1) < 6*size(w, 2))
+ delx = xmax; dely = xmax*size(w, 1)/(size(w, 2));
+else
+ delx = ymax*size(w, 2)/size(w, 1); dely = ymax;
+end
+
+h = figure('Color', [0.5 0.5 0.5], ...
+ 'Name', 'Hinton diagram', ...
+ 'NumberTitle', 'off', ...
+ 'Colormap', [0 0 0; 1 1 1], ...
+ 'Units', 'pixels', ...
+ 'Position', [x01 y01 delx dely]);
+set(gca, 'Visible', 'off', 'Position', [0 0 1 1]);
+hold on
+patch(xvals', yvals', color', 'Edgecolor', 'none');
+axis equal;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/histp.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/histp.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function h = histp(x, xmin, xmax, nbins)
+%HISTP Histogram estimate of 1-dimensional probability distribution.
+%
+% Description
+%
+% HISTP(X, XMIN, XMAX, NBINS) takes a column vector X of data values
+% and generates a normalized histogram plot of the distribution. The
+% histogram has NBINS bins lying in the range XMIN to XMAX.
+%
+% H = HISTP(...) returns a vector of patch handles.
+%
+% See also
+% DEMGAUSS
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+ndata = length(x);
+
+bins = linspace(xmin, xmax, nbins);
+
+binwidth = (xmax - xmin)/nbins;
+
+num = hist(x, bins);
+
+num = num/(ndata*binwidth);
+
+h = bar(bins, num, 0.6);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/hmc.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/hmc.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,281 @@
+function [samples, energies, diagn] = hmc(f, x, options, gradf, varargin)
+%HMC Hybrid Monte Carlo sampling.
+%
+% Description
+% SAMPLES = HMC(F, X, OPTIONS, GRADF) uses a hybrid Monte Carlo
+% algorithm to sample from the distribution P ~ EXP(-F), where F is the
+% first argument to HMC. The Markov chain starts at the point X, and
+% the function GRADF is the gradient of the `energy' function F.
+%
+% HMC(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional arguments to
+% be passed to F() and GRADF().
+%
+% [SAMPLES, ENERGIES, DIAGN] = HMC(F, X, OPTIONS, GRADF) also returns a
+% log of the energy values (i.e. negative log probabilities) for the
+% samples in ENERGIES and DIAGN, a structure containing diagnostic
+% information (position, momentum and acceptance threshold) for each
+% step of the chain in DIAGN.POS, DIAGN.MOM and DIAGN.ACC respectively.
+% All candidate states (including rejected ones) are stored in
+% DIAGN.POS.
+%
+% [SAMPLES, ENERGIES, DIAGN] = HMC(F, X, OPTIONS, GRADF) also returns
+% the ENERGIES (i.e. negative log probabilities) corresponding to the
+% samples. The DIAGN structure contains three fields:
+%
+% POS the position vectors of the dynamic process.
+%
+% MOM the momentum vectors of the dynamic process.
+%
+% ACC the acceptance thresholds.
+%
+% S = HMC('STATE') returns a state structure that contains the state of
+% the two random number generators RAND and RANDN and the momentum of
+% the dynamic process. These are contained in fields randstate,
+% randnstate and mom respectively. The momentum state is only used for
+% a persistent momentum update.
+%
+% HMC('STATE', S) resets the state to S. If S is an integer, then it
+% is passed to RAND and RANDN and the momentum variable is randomised.
+% If S is a structure returned by HMC('STATE') then it resets the
+% generator to exactly the same state.
+%
+% The optional parameters in the OPTIONS vector have the following
+% interpretations.
+%
+% OPTIONS(1) is set to 1 to display the energy values and rejection
+% threshold at each step of the Markov chain. If the value is 2, then
+% the position vectors at each step are also displayed.
+%
+% OPTIONS(5) is set to 1 if momentum persistence is used; default 0,
+% for complete replacement of momentum variables.
+%
+% OPTIONS(7) defines the trajectory length (i.e. the number of leap-
+% frog steps at each iteration). Minimum value 1.
+%
+% OPTIONS(9) is set to 1 to check the user defined gradient function.
+%
+% OPTIONS(14) is the number of samples retained from the Markov chain;
+% default 100.
+%
+% OPTIONS(15) is the number of samples omitted from the start of the
+% chain; default 0.
+%
+% OPTIONS(17) defines the momentum used when a persistent update of
+% (leap-frog) momentum is used. This is bounded to the interval [0,
+% 1).
+%
+% OPTIONS(18) is the step size used in leap-frogs; default 1/trajectory
+% length.
+%
+% See also
+% METROP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Global variable to store state of momentum variables: set by set_state
+% Used to initialise variable if set
+global HMC_MOM
+if nargin <= 2
+ if ~strcmp(f, 'state')
+ error('Unknown argument to hmc');
+ end
+ switch nargin
+ case 1
+ samples = get_state(f);
+ return;
+ case 2
+ set_state(f, x);
+ return;
+ end
+end
+
+display = options(1);
+if (round(options(5) == 1))
+ persistence = 1;
+ % Set alpha to lie in [0, 1)
+ alpha = max(0, options(17));
+ alpha = min(1, alpha);
+ salpha = sqrt(1-alpha*alpha);
+else
+ persistence = 0;
+end
+L = max(1, options(7)); % At least one step in leap-frogging
+if options(14) > 0
+ nsamples = options(14);
+else
+ nsamples = 100; % Default
+end
+if options(15) >= 0
+ nomit = options(15);
+else
+ nomit = 0;
+end
+if options(18) > 0
+ step_size = options(18); % Step size.
+else
+ step_size = 1/L; % Default
+end
+x = x(:)'; % Force x to be a row vector
+nparams = length(x);
+
+% Set up strings for evaluating potential function and its gradient.
+f = fcnchk(f, length(varargin));
+gradf = fcnchk(gradf, length(varargin));
+
+% Check the gradient evaluation.
+if (options(9))
+ % Check gradients
+ feval('gradchek', x, f, gradf, varargin{:});
+end
+
+samples = zeros(nsamples, nparams); % Matrix of returned samples.
+if nargout >= 2
+ en_save = 1;
+ energies = zeros(nsamples, 1);
+else
+ en_save = 0;
+end
+if nargout >= 3
+ diagnostics = 1;
+ diagn_pos = zeros(nsamples, nparams);
+ diagn_mom = zeros(nsamples, nparams);
+ diagn_acc = zeros(nsamples, 1);
+else
+ diagnostics = 0;
+end
+
+n = - nomit + 1;
+Eold = feval(f, x, varargin{:}); % Evaluate starting energy.
+nreject = 0;
+if (~persistence | isempty(HMC_MOM))
+ p = randn(1, nparams); % Initialise momenta at random
+else
+ p = HMC_MOM; % Initialise momenta from stored state
+end
+lambda = 1;
+
+% Main loop.
+while n <= nsamples
+
+ xold = x; % Store starting position.
+ pold = p; % Store starting momenta
+ Hold = Eold + 0.5*(p*p'); % Recalculate Hamiltonian as momenta have changed
+
+ if ~persistence
+ % Choose a direction at random
+ if (rand < 0.5)
+ lambda = -1;
+ else
+ lambda = 1;
+ end
+ end
+ % Perturb step length.
+ epsilon = lambda*step_size*(1.0 + 0.1*randn(1));
+
+ % First half-step of leapfrog.
+ p = p - 0.5*epsilon*feval(gradf, x, varargin{:});
+ x = x + epsilon*p;
+
+ % Full leapfrog steps.
+ for m = 1 : L - 1
+ p = p - epsilon*feval(gradf, x, varargin{:});
+ x = x + epsilon*p;
+ end
+
+ % Final half-step of leapfrog.
+ p = p - 0.5*epsilon*feval(gradf, x, varargin{:});
+
+ % Now apply Metropolis algorithm.
+ Enew = feval(f, x, varargin{:}); % Evaluate new energy.
+ p = -p; % Negate momentum
+ Hnew = Enew + 0.5*p*p'; % Evaluate new Hamiltonian.
+ a = exp(Hold - Hnew); % Acceptance threshold.
+ if (diagnostics & n > 0)
+ diagn_pos(n,:) = x;
+ diagn_mom(n,:) = p;
+ diagn_acc(n,:) = a;
+ end
+ if (display > 1)
+ fprintf(1, 'New position is\n');
+ disp(x);
+ end
+
+ if a > rand(1) % Accept the new state.
+ Eold = Enew; % Update energy
+ if (display > 0)
+ fprintf(1, 'Finished step %4d Threshold: %g\n', n, a);
+ end
+ else % Reject the new state.
+ if n > 0
+ nreject = nreject + 1;
+ end
+ x = xold; % Reset position
+ p = pold; % Reset momenta
+ if (display > 0)
+ fprintf(1, ' Sample rejected %4d. Threshold: %g\n', n, a);
+ end
+ end
+ if n > 0
+ samples(n,:) = x; % Store sample.
+ if en_save
+ energies(n) = Eold; % Store energy.
+ end
+ end
+
+ % Set momenta for next iteration
+ if persistence
+ p = -p;
+ % Adjust momenta by a small random amount.
+ p = alpha.*p + salpha.*randn(1, nparams);
+ else
+ p = randn(1, nparams); % Replace all momenta.
+ end
+
+ n = n + 1;
+end
+
+if (display > 0)
+ fprintf(1, '\nFraction of samples rejected: %g\n', ...
+ nreject/(nsamples));
+end
+if diagnostics
+ diagn.pos = diagn_pos;
+ diagn.mom = diagn_mom;
+ diagn.acc = diagn_acc;
+end
+% Store final momentum value in global so that it can be retrieved later
+HMC_MOM = p;
+return
+
+% Return complete state of sampler (including momentum)
+function state = get_state(f)
+
+global HMC_MOM
+state.randstate = rand('state');
+state.randnstate = randn('state');
+state.mom = HMC_MOM;
+return
+
+% Set complete state of sampler (including momentum) or just set randn
+% and rand with integer argument.
+function set_state(f, x)
+
+global HMC_MOM
+if isnumeric(x)
+ rand('state', x);
+ randn('state', x);
+ HMC_MOM = [];
+else
+ if ~isstruct(x)
+ error('Second argument to hmc must be number or state structure');
+ end
+ if (~isfield(x, 'randstate') | ~isfield(x, 'randnstate') ...
+ | ~isfield(x, 'mom'))
+ error('Second argument to hmc must contain correct fields')
+ end
+ rand('state', x.randstate);
+ randn('state', x.randnstate);
+ HMC_MOM = x.mom;
+end
+return
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/kmeansNetlab.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/kmeansNetlab.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,124 @@
+function [centres, options, post, errlog] = kmeans(centres, data, options)
+%KMEANS Trains a k means cluster model.
+%
+% Description
+% CENTRES = KMEANS(CENTRES, DATA, OPTIONS) uses the batch K-means
+% algorithm to set the centres of a cluster model. The matrix DATA
+% represents the data which is being clustered, with each row
+% corresponding to a vector. The sum of squares error function is used.
+% The point at which a local minimum is achieved is returned as
+% CENTRES. The error value at that point is returned in OPTIONS(8).
+%
+% [CENTRES, OPTIONS, POST, ERRLOG] = KMEANS(CENTRES, DATA, OPTIONS)
+% also returns the cluster number (in a one-of-N encoding) for each
+% data point in POST and a log of the error values after each cycle in
+% ERRLOG. The optional parameters have the following
+% interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values; also logs error
+% values in the return argument ERRLOG. If OPTIONS(1) is set to 0, then
+% only warning messages are displayed. If OPTIONS(1) is -1, then
+% nothing is displayed.
+%
+% OPTIONS(2) is a measure of the absolute precision required for the
+% value of CENTRES at the solution. If the absolute difference between
+% the values of CENTRES between two successive steps is less than
+% OPTIONS(2), then this condition is satisfied.
+%
+% OPTIONS(3) is a measure of the precision required of the error
+% function at the solution. If the absolute difference between the
+% error functions between two successive steps is less than OPTIONS(3),
+% then this condition is satisfied. Both this and the previous
+% condition must be satisfied for termination.
+%
+% OPTIONS(14) is the maximum number of iterations; default 100.
+%
+% See also
+% GMMINIT, GMMEM
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+[ndata, data_dim] = size(data);
+[ncentres, dim] = size(centres);
+
+if dim ~= data_dim
+ error('Data dimension does not match dimension of centres')
+end
+
+if (ncentres > ndata)
+ error('More centres than data')
+end
+
+% Sort out the options
+if (options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+
+store = 0;
+if (nargout > 3)
+ store = 1;
+ errlog = zeros(1, niters);
+end
+
+% Check if centres and posteriors need to be initialised from data
+if (options(5) == 1)
+ % Do the initialisation
+ perm = randperm(ndata);
+ perm = perm(1:ncentres);
+
+ % Assign first ncentres (permuted) data points as centres
+ centres = data(perm, :);
+end
+% Matrix to make unit vectors easy to construct
+id = eye(ncentres);
+
+% Main loop of algorithm
+for n = 1:niters
+
+ % Save old centres to check for termination
+ old_centres = centres;
+
+ % Calculate posteriors based on existing centres
+ d2 = dist2(data, centres);
+ % Assign each point to nearest centre
+ [minvals, index] = min(d2', [], 1);
+ post = id(index,:);
+
+ num_points = sum(post, 1);
+ % Adjust the centres based on new posteriors
+ for j = 1:ncentres
+ if (num_points(j) > 0)
+ centres(j,:) = sum(data(find(post(:,j)),:), 1)/num_points(j);
+ end
+ end
+
+ % Error value is total squared distance from cluster centres
+ e = sum(minvals);
+ if store
+ errlog(n) = e;
+ end
+ if options(1) > 0
+ fprintf(1, 'Cycle %4d Error %11.6f\n', n, e);
+ end
+
+ if n > 1
+ % Test for termination
+ if max(max(abs(centres - old_centres))) < options(2) & ...
+ abs(old_e - e) < options(3)
+ options(8) = e;
+ return;
+ end
+ end
+ old_e = e;
+end
+
+% If we get here, then we haven't terminated in the given number of
+% iterations.
+options(8) = e;
+if (options(1) >= 0)
+ disp(maxitmess);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/knn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/knn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function net = knn(nin, nout, k, tr_in, tr_targets)
+%KNN Creates a K-nearest-neighbour classifier.
+%
+% Description
+% NET = KNN(NIN, NOUT, K, TR_IN, TR_TARGETS) creates a KNN model NET
+% with input dimension NIN, output dimension NOUT and K neighbours.
+% The training data is also stored in the data structure and the
+% targets are assumed to be using a 1-of-N coding.
+%
+% The fields in NET are
+% type = 'knn'
+% nin = number of inputs
+% nout = number of outputs
+% tr_in = training input data
+% tr_targets = training target data
+%
+% See also
+% KMEANS, KNNFWD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+net.type = 'knn';
+net.nin = nin;
+net.nout = nout;
+net.k = k;
+errstring = consist(net, 'knn', tr_in, tr_targets);
+if ~isempty(errstring)
+ error(errstring);
+end
+net.tr_in = tr_in;
+net.tr_targets = tr_targets;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/knnfwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/knnfwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+function [y, l] = knnfwd(net, x)
+%KNNFWD Forward propagation through a K-nearest-neighbour classifier.
+%
+% Description
+% [Y, L] = KNNFWD(NET, X) takes a matrix X of input vectors (one vector
+% per row) and uses the K-nearest-neighbour rule on the training data
+% contained in NET to produce a matrix Y of outputs and a matrix L of
+% classification labels. The nearest neighbours are determined using
+% Euclidean distance. The IJth entry of Y counts the number of
+% occurrences that an example from class J is among the K closest
+% training examples to example I from X. The matrix L contains the
+% predicted class labels as an index 1..N, not as 1-of-N coding.
+%
+% See also
+% KMEANS, KNN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+errstring = consist(net, 'knn', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+ntest = size(x, 1); % Number of input vectors.
+nclass = size(net.tr_targets, 2); % Number of classes.
+
+% Compute matrix of squared distances between input vectors from the training
+% and test sets. The matrix distsq has dimensions (ntrain, ntest).
+
+distsq = dist2(net.tr_in, x);
+
+% Now sort the distances. This generates a matrix kind of the same
+% dimensions as distsq, in which each column gives the indices of the
+% elements in the corresponding column of distsq in ascending order.
+
+[vals, kind] = sort(distsq);
+y = zeros(ntest, nclass);
+
+for k=1:net.k
+ % We now look at the predictions made by the Kth nearest neighbours alone,
+ % and represent this as a 1-of-N coded matrix, and then accumulate the
+ % predictions so far.
+
+ y = y + net.tr_targets(kind(k,:),:);
+
+end
+
+if nargout == 2
+ % Convert this set of outputs to labels, randomly breaking ties
+ [temp, l] = max((y + 0.1*rand(size(y))), [], 2);
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/linef.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/linef.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function y = linef(lambda, fn, x, d, varargin)
+%LINEF Calculate function value along a line.
+%
+% Description
+% LINEF(LAMBDA, FN, X, D) calculates the value of the function FN at
+% the point X+LAMBDA*D. Here X is a row vector and LAMBDA is a scalar.
+%
+% LINEF(LAMBDA, FN, X, D, P1, P2, ...) allows additional arguments to
+% be passed to FN(). This function is used for convenience in some of
+% the optimisation routines.
+%
+% See also
+% GRADCHEK, LINEMIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check function string
+fn = fcnchk(fn, length(varargin));
+
+y = feval(fn, x+lambda.*d, varargin{:});
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/linemin.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/linemin.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,152 @@
+function [x, options] = linemin(f, pt, dir, fpt, options, ...
+ varargin)
+%LINEMIN One dimensional minimization.
+%
+% Description
+% [X, OPTIONS] = LINEMIN(F, PT, DIR, FPT, OPTIONS) uses Brent's
+% algorithm to find the minimum of the function F(X) along the line DIR
+% through the point PT. The function value at the starting point is
+% FPT. The point at which F has a local minimum is returned as X. The
+% function value at that point is returned in OPTIONS(8).
+%
+% LINEMIN(F, PT, DIR, FPT, OPTIONS, P1, P2, ...) allows additional
+% arguments to be passed to F().
+%
+% The optional parameters have the following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values.
+%
+% OPTIONS(2) is a measure of the absolute precision required for the
+% value of X at the solution.
+%
+% OPTIONS(3) is a measure of the precision required of the objective
+% function at the solution. Both this and the previous condition must
+% be satisfied for termination.
+%
+% OPTIONS(14) is the maximum number of iterations; default 100.
+%
+% See also
+% CONJGRAD, MINBRACK, QUASINEW
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set up the options.
+if(options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+options(10) = 0; % Initialise count of function evaluations
+
+display = options(1);
+
+% Check function string
+f = fcnchk(f, length(varargin));
+
+% Value of golden section (1 + sqrt(5))/2.0
+phi = 1.6180339887499;
+cphi = 1 - 1/phi;
+TOL = sqrt(eps); % Maximal fractional precision
+TINY = 1.0e-10; % Can't use fractional precision when minimum is at 0
+
+% Bracket the minimum
+[br_min, br_mid, br_max, num_evals] = feval('minbrack', 'linef', ...
+ 0.0, 1.0, fpt, f, pt, dir, varargin{:});
+options(10) = options(10) + num_evals; % Increment number of fn. evals
+ % No gradient evals in minbrack
+
+% Use Brent's algorithm to find minimum
+% Initialise the points and function values
+w = br_mid; % Where second from minimum is
+v = br_mid; % Previous value of w
+x = v; % Where current minimum is
+e = 0.0; % Distance moved on step before last
+fx = feval('linef', x, f, pt, dir, varargin{:});
+options(10) = options(10) + 1;
+fv = fx; fw = fx;
+
+for n = 1:niters
+ xm = 0.5.*(br_min+br_max); % Middle of bracket
+ % Make sure that tolerance is big enough
+ tol1 = TOL * (max(abs(x))) + TINY;
+ % Decide termination on absolute precision required by options(2)
+ if (max(abs(x - xm)) <= options(2) & br_max-br_min < 4*options(2))
+ options(8) = fx;
+ return;
+ end
+ % Check if step before last was big enough to try a parabolic step.
+ % Note that this will fail on first iteration, which must be a golden
+ % section step.
+ if (max(abs(e)) > tol1)
+ % Construct a trial parabolic fit through x, v and w
+ r = (fx - fv) .* (x - w);
+ q = (fx - fw) .* (x - v);
+ p = (x - v).*q - (x - w).*r;
+ q = 2.0 .* (q - r);
+ if (q > 0.0) p = -p; end
+ q = abs(q);
+ % Test if the parabolic fit is OK
+ if (abs(p) >= abs(0.5*q*e) | p <= q*(br_min-x) | p >= q*(br_max-x))
+ % No it isn't, so take a golden section step
+ if (x >= xm)
+ e = br_min-x;
+ else
+ e = br_max-x;
+ end
+ d = cphi*e;
+ else
+ % Yes it is, so take the parabolic step
+ e = d;
+ d = p/q;
+ u = x+d;
+ if (u-br_min < 2*tol1 | br_max-u < 2*tol1)
+ d = sign(xm-x)*tol1;
+ end
+ end
+ else
+ % Step before last not big enough, so take a golden section step
+ if (x >= xm)
+ e = br_min - x;
+ else
+ e = br_max - x;
+ end
+ d = cphi*e;
+ end
+ % Make sure that step is big enough
+ if (abs(d) >= tol1)
+ u = x+d;
+ else
+ u = x + sign(d)*tol1;
+ end
+ % Evaluate function at u
+ fu = feval('linef', u, f, pt, dir, varargin{:});
+ options(10) = options(10) + 1;
+ % Reorganise bracket
+ if (fu <= fx)
+ if (u >= x)
+ br_min = x;
+ else
+ br_max = x;
+ end
+ v = w; w = x; x = u;
+ fv = fw; fw = fx; fx = fu;
+ else
+ if (u < x)
+ br_min = u;
+ else
+ br_max = u;
+ end
+ if (fu <= fw | w == x)
+ v = w; w = u;
+ fv = fw; fw = fu;
+ elseif (fu <= fv | v == x | v == w)
+ v = u;
+ fv = fu;
+ end
+ end
+ if (display == 1)
+ fprintf(1, 'Cycle %4d Error %11.6f\n', n, fx);
+ end
+end
+options(8) = fx;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/maxitmess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/maxitmess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function s = maxitmess()
+%MAXITMESS Create a standard error message when training reaches max. iterations.
+%
+% Description
+% S = MAXITMESS returns a standard string that it used by training
+% algorithms when the maximum number of iterations (as specified in
+% OPTIONS(14) is reached.
+%
+% See also
+% CONJGRAD, GLMTRAIN, GMMEM, GRADDESC, GTMEM, KMEANS, OLGD, QUASINEW, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+s = 'Maximum number of iterations has been exceeded';
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdn.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdn.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,77 @@
+function net = mdn(nin, nhidden, ncentres, dim_target, mix_type, ...
+ prior, beta)
+%MDN Creates a Mixture Density Network with specified architecture.
+%
+% Description
+% NET = MDN(NIN, NHIDDEN, NCENTRES, DIMTARGET) takes the number of
+% inputs, hidden units for a 2-layer feed-forward network and the
+% number of centres and target dimension for the mixture model whose
+% parameters are set from the outputs of the neural network. The fifth
+% argument MIXTYPE is used to define the type of mixture model.
+% (Currently there is only one type supported: a mixture of Gaussians
+% with a single covariance parameter for each component.) For this
+% model, the mixture coefficients are computed from a group of softmax
+% outputs, the centres are equal to a group of linear outputs, and the
+% variances are obtained by applying the exponential function to a
+% third group of outputs.
+%
+% The network is initialised by a call to MLP, and the arguments PRIOR,
+% and BETA have the same role as for that function. Weight
+% initialisation uses the Matlab function RANDN and so the seed for
+% the random weight initialization can be set using RANDN('STATE', S)
+% where S is the seed value. A specialised data structure (rather than
+% GMM) is used for the mixture model outputs to improve the efficiency
+% of error and gradient calculations in network training. The fields
+% are described in MDNFWD where they are set up.
+%
+% The fields in NET are
+%
+% type = 'mdn'
+% nin = number of input variables
+% nout = dimension of target space (not number of network outputs)
+% nwts = total number of weights and biases
+% mdnmixes = data structure for mixture model output
+% mlp = data structure for MLP network
+%
+% See also
+% MDNFWD, MDNERR, MDN2GMM, MDNGRAD, MDNPAK, MDNUNPAK, MLP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+% Currently ignore type argument: reserved for future use
+net.type = 'mdn';
+
+% Set up the mixture model part of the structure
+% For efficiency we use a specialised data structure in place of GMM
+mdnmixes.type = 'mdnmixes';
+mdnmixes.ncentres = ncentres;
+mdnmixes.dim_target = dim_target;
+
+% This calculation depends on spherical variances
+mdnmixes.nparams = ncentres + ncentres*dim_target + ncentres;
+
+% Make the weights in the mdnmixes structure null
+mdnmixes.mixcoeffs = [];
+mdnmixes.centres = [];
+mdnmixes.covars = [];
+
+% Number of output nodes = number of parameters in mixture model
+nout = mdnmixes.nparams;
+
+% Set up the MLP part of the network
+if (nargin == 5)
+ mlpnet = mlp(nin, nhidden, nout, 'linear');
+elseif (nargin == 6)
+ mlpnet = mlp(nin, nhidden, nout, 'linear', prior);
+elseif (nargin == 7)
+ mlpnet = mlp(nin, nhidden, nout, 'linear', prior, beta);
+end
+
+% Create descriptor
+net.mdnmixes = mdnmixes;
+net.mlp = mlpnet;
+net.nin = nin;
+net.nout = dim_target;
+net.nwts = mlpnet.nwts;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdn2gmm.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdn2gmm.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,41 @@
+function gmmmixes = mdn2gmm(mdnmixes)
+%MDN2GMM Converts an MDN mixture data structure to array of GMMs.
+%
+% Description
+% GMMMIXES = MDN2GMM(MDNMIXES) takes an MDN mixture data structure
+% MDNMIXES containing three matrices (for priors, centres and
+% variances) where each row represents the corresponding parameter
+% values for a different mixture model and creates an array of GMMs.
+% These can then be used with the standard Netlab Gaussian mixture
+% model functions.
+%
+% See also
+% GMM, MDN, MDNFWD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+% Check argument for consistency
+errstring = consist(mdnmixes, 'mdnmixes');
+if ~isempty(errstring)
+ error(errstring);
+end
+
+nmixes = size(mdnmixes.centres, 1);
+% Construct ndata structures containing the mixture model information.
+% First allocate the memory.
+tempmix = gmm(mdnmixes.dim_target, mdnmixes.ncentres, 'spherical');
+f = fieldnames(tempmix);
+gmmmixes = cell(size(f, 1), 1, nmixes);
+gmmmixes = cell2struct(gmmmixes, f,1);
+
+% Then fill each structure in turn using gmmunpak. Assume that spherical
+% covariance structure is used.
+for i = 1:nmixes
+ centres = reshape(mdnmixes.centres(i, :), mdnmixes.dim_target, ...
+ mdnmixes.ncentres)';
+ gmmmixes(i) = gmmunpak(tempmix, [mdnmixes.mixcoeffs(i,:), ...
+ centres(:)', mdnmixes.covars(i,:)]);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdndist2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdndist2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+function n2 = mdndist2(mixparams, t)
+%MDNDIST2 Calculates squared distance between centres of Gaussian kernels and data
+%
+% Description
+% N2 = MDNDIST2(MIXPARAMS, T) takes takes the centres of the Gaussian
+% contained in MIXPARAMS and the target data matrix, T, and computes
+% the squared Euclidean distance between them. If T has M rows and N
+% columns, then the CENTRES field in the MIXPARAMS structure should
+% have M rows and N*MIXPARAMS.NCENTRES columns: the centres in each row
+% relate to the corresponding row in T. The result has M rows and
+% MIXPARAMS.NCENTRES columns. The I, Jth entry is the squared distance
+% from the Ith row of X to the Jth centre in the Ith row of
+% MIXPARAMS.CENTRES.
+%
+% See also
+% MDNFWD, MDNPROB
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+% Check arguments for consistency
+errstring = consist(mixparams, 'mdnmixes');
+if ~isempty(errstring)
+ error(errstring);
+end
+
+ncentres = mixparams.ncentres;
+dim_target = mixparams.dim_target;
+ntarget = size(t, 1);
+if ntarget ~= size(mixparams.centres, 1)
+ error('Number of targets does not match number of mixtures')
+end
+if size(t, 2) ~= mixparams.dim_target
+ error('Target dimension does not match mixture dimension')
+end
+
+% Build t that suits parameters, that is repeat t for each centre
+t = kron(ones(1, ncentres), t);
+
+% Do subtraction and square
+diff2 = (t - mixparams.centres).^2;
+
+% Reshape and sum each component
+diff2 = reshape(diff2', dim_target, (ntarget*ncentres))';
+n2 = sum(diff2, 2);
+
+% Calculate the sum of distance, and reshape
+% so that we have a distance for each centre per target
+n2 = reshape(n2, ncentres, ntarget)';
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdnerr.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdnerr.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,33 @@
+function e = mdnerr(net, x, t)
+%MDNERR Evaluate error function for Mixture Density Network.
+%
+% Description
+% E = MDNERR(NET, X, T) takes a mixture density network data structure
+% NET, a matrix X of input vectors and a matrix T of target vectors,
+% and evaluates the error function E. The error function is the
+% negative log likelihood of the target data under the conditional
+% density given by the mixture model parameterised by the MLP. Each
+% row of X corresponds to one input vector and each row of T
+% corresponds to one target vector.
+%
+% See also
+% MDN, MDNFWD, MDNGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+% Check arguments for consistency
+errstring = consist(net, 'mdn', x, t);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+% Get the output mixture models
+mixparams = mdnfwd(net, x);
+
+% Compute the probabilities of mixtures
+probs = mdnprob(mixparams, t);
+% Compute the error
+e = sum( -log(max(eps, sum(probs, 2))));
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdnfwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdnfwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,93 @@
+function [mixparams, y, z, a] = mdnfwd(net, x)
+%MDNFWD Forward propagation through Mixture Density Network.
+%
+% Description
+% MIXPARAMS = MDNFWD(NET, X) takes a mixture density network data
+% structure NET and a matrix X of input vectors, and forward propagates
+% the inputs through the network to generate a structure MIXPARAMS
+% which contains the parameters of several mixture models. Each row
+% of X represents one input vector and the corresponding row of the
+% matrices in MIXPARAMS represents the parameters of a mixture model
+% for the conditional probability of target vectors given the input
+% vector. This is not represented as an array of GMM structures to
+% improve the efficiency of MDN training.
+%
+% The fields in MIXPARAMS are
+% type = 'mdnmixes'
+% ncentres = number of mixture components
+% dimtarget = dimension of target space
+% mixcoeffs = mixing coefficients
+% centres = means of Gaussians: stored as one row per pattern
+% covars = covariances of Gaussians
+% nparams = number of parameters
+%
+% [MIXPARAMS, Y, Z] = MDNFWD(NET, X) also generates a matrix Y of the
+% outputs of the MLP and a matrix Z of the hidden unit activations
+% where each row corresponds to one pattern.
+%
+% [MIXPARAMS, Y, Z, A] = MLPFWD(NET, X) also returns a matrix A giving
+% the summed inputs to each output unit, where each row corresponds to
+% one pattern.
+%
+% See also
+% MDN, MDN2GMM, MDNERR, MDNGRAD, MLPFWD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+% Check arguments for consistency
+errstring = consist(net, 'mdn', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+% Extract mlp and mixture model descriptors
+mlpnet = net.mlp;
+mixes = net.mdnmixes;
+
+ncentres = mixes.ncentres; % Number of components in mixture model
+dim_target = mixes.dim_target; % Dimension of targets
+nparams = mixes.nparams; % Number of parameters in mixture model
+
+% Propagate forwards through MLP
+[y, z, a] = mlpfwd(mlpnet, x);
+
+% Compute the postion for each parameter in the whole
+% matrix. Used to define the mixparams structure
+mixcoeff = [1:1:ncentres];
+centres = [ncentres+1:1:(ncentres*(1+dim_target))];
+variances = [(ncentres*(1+dim_target)+1):1:nparams];
+
+% Convert output values into mixture model parameters
+
+% Use softmax to calculate priors
+% Prevent overflow and underflow: use same bounds as glmfwd
+% Ensure that sum(exp(y), 2) does not overflow
+maxcut = log(realmax) - log(ncentres);
+% Ensure that exp(y) > 0
+mincut = log(realmin);
+temp = min(y(:,1:ncentres), maxcut);
+temp = max(temp, mincut);
+temp = exp(temp);
+mixpriors = temp./(sum(temp, 2)*ones(1,ncentres));
+
+% Centres are just copies of network outputs
+mixcentres = y(:,(ncentres+1):ncentres*(1+dim_target));
+
+% Variances are exp of network outputs
+mixwidths = exp(y(:,(ncentres*(1+dim_target)+1):nparams));
+
+% Now build up all the mixture model weight vectors
+ndata = size(x, 1);
+
+% Return parameters
+mixparams.type = mixes.type;
+mixparams.ncentres = mixes.ncentres;
+mixparams.dim_target = mixes.dim_target;
+mixparams.nparams = mixes.nparams;
+
+mixparams.mixcoeffs = mixpriors;
+mixparams.centres = mixcentres;
+mixparams.covars = mixwidths;
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdngrad.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdngrad.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,66 @@
+function g = mdngrad(net, x, t)
+%MDNGRAD Evaluate gradient of error function for Mixture Density Network.
+%
+% Description
+% G = MDNGRAD(NET, X, T) takes a mixture density network data
+% structure NET, a matrix X of input vectors and a matrix T of target
+% vectors, and evaluates the gradient G of the error function with
+% respect to the network weights. The error function is negative log
+% likelihood of the target data. Each row of X corresponds to one
+% input vector and each row of T corresponds to one target vector.
+%
+% See also
+% MDN, MDNFWD, MDNERR, MDNPROB, MLPBKP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+% Check arguments for consistency
+errstring = consist(net, 'mdn', x, t);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+[mixparams, y, z] = mdnfwd(net, x);
+
+% Compute gradients at MLP outputs: put the answer in deltas
+ncentres = net.mdnmixes.ncentres;
+dim_target = net.mdnmixes.dim_target;
+nmixparams = net.mdnmixes.nparams;
+ntarget = size(t, 1);
+deltas = zeros(ntarget, net.mlp.nout);
+e = ones(ncentres, 1);
+f = ones(1, dim_target);
+
+post = mdnpost(mixparams, t);
+
+% Calculate prior derivatives
+deltas(:,1:ncentres) = mixparams.mixcoeffs - post;
+
+% Calculate centre derivatives
+long_t = kron(ones(1, ncentres), t);
+centre_err = mixparams.centres - long_t;
+
+% Get the post to match each u_jk:
+% this array will be (ntarget, (ncentres*dim_target))
+long_post = kron(ones(dim_target, 1), post);
+long_post = reshape(long_post, ntarget, (ncentres*dim_target));
+
+% Get the variance to match each u_jk:
+var = mixparams.covars;
+var = kron(ones(dim_target, 1), var);
+var = reshape(var, ntarget, (ncentres*dim_target));
+
+% Compute centre deltas
+deltas(:, (ncentres+1):(ncentres*(1+dim_target))) = ...
+ (centre_err.*long_post)./var;
+
+% Compute variance deltas
+dist2 = mdndist2(mixparams, t);
+c = dim_target*ones(ntarget, ncentres);
+deltas(:, (ncentres*(1+dim_target)+1):nmixparams) = ...
+ post.*((dist2./mixparams.covars)-c)./(-2);
+
+% Now back-propagate deltas through MLP
+g = mlpbkp(net.mlp, x, z, deltas);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdninit.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdninit.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+function net = mdninit(net, prior, t, options)
+%MDNINIT Initialise the weights in a Mixture Density Network.
+%
+% Description
+%
+% NET = MDNINIT(NET, PRIOR) takes a Mixture Density Network NET and
+% sets the weights and biases by sampling from a Gaussian distribution.
+% It calls MLPINIT for the MLP component of NET.
+%
+% NET = MDNINIT(NET, PRIOR, T, OPTIONS) uses the target data T to
+% initialise the biases for the output units after initialising the
+% other weights as above. It calls GMMINIT, with T and OPTIONS as
+% arguments, to obtain a model of the unconditional density of T. The
+% biases are then set so that NET will output the values in the
+% Gaussian mixture model.
+%
+% See also
+% MDN, MLP, MLPINIT, GMMINIT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+% Initialise network weights from prior: this gives noise around values
+% determined later
+net.mlp = mlpinit(net.mlp, prior);
+
+if nargin > 2
+ % Initialise priors, centres and variances from target data
+ temp_mix = gmm(net.mdnmixes.dim_target, net.mdnmixes.ncentres, 'spherical');
+ temp_mix = gmminit(temp_mix, t, options);
+
+ ncentres = net.mdnmixes.ncentres;
+ dim_target = net.mdnmixes.dim_target;
+
+ % Now set parameters in MLP to yield the right values.
+ % This involves setting the biases correctly.
+
+ % Priors
+ net.mlp.b2(1:ncentres) = temp_mix.priors;
+
+ % Centres are arranged in mlp such that we have
+ % u11, u12, u13, ..., u1c, ... , uj1, uj2, uj3, ..., ujc, ..., um1, uM2,
+ % ..., uMc
+ % This is achieved by transposing temp_mix.centres before reshaping
+ end_centres = ncentres*(dim_target+1);
+ net.mlp.b2(ncentres+1:end_centres) = ...
+ reshape(temp_mix.centres', 1, ncentres*dim_target);
+
+ % Variances
+ net.mlp.b2((end_centres+1):net.mlp.nout) = ...
+ log(temp_mix.covars);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdnnet.mat
Binary file toolboxes/FullBNT-1.0.7/netlab3.3/mdnnet.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdnpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdnpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function w = mdnpak(net)
+%MDNPAK Combines weights and biases into one weights vector.
+%
+% Description
+% W = MDNPAK(NET) takes a mixture density network data structure NET
+% and combines the network weights into a single row vector W.
+%
+% See also
+% MDN, MDNUNPAK, MDNFWD, MDNERR, MDNGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+errstring = consist(net, 'mdn');
+if ~errstring
+ error(errstring);
+end
+w = mlppak(net.mlp);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdnpost.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdnpost.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,26 @@
+function [post, a] = mdnpost(mixparams, t)
+%MDNPOST Computes the posterior probability for each MDN mixture component.
+%
+% Description
+% POST = MDNPOST(MIXPARAMS, T) computes the posterior probability
+% P(J|T) of each data vector in T under the Gaussian mixture model
+% represented by the corresponding entries in MIXPARAMS. Each row of T
+% represents a single vector.
+%
+% [POST, A] = MDNPOST(MIXPARAMS, T) also computes the activations A
+% (i.e. the probability P(T|J) of the data conditioned on each
+% component density) for a Gaussian mixture model.
+%
+% See also
+% MDNGRAD, MDNPROB
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+[prob a] = mdnprob(mixparams, t);
+
+s = sum(prob, 2);
+% Set any zeros to one before dividing
+s = s + (s==0);
+post = prob./(s*ones(1, mixparams.ncentres));
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdnprob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdnprob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function [prob,a] = mdnprob(mixparams, t)
+%MDNPROB Computes the data probability likelihood for an MDN mixture structure.
+%
+% Description
+% PROB = MDNPROB(MIXPARAMS, T) computes the probability P(T) of each
+% data vector in T under the Gaussian mixture model represented by the
+% corresponding entries in MIXPARAMS. Each row of T represents a single
+% vector.
+%
+% [PROB, A] = MDNPROB(MIXPARAMS, T) also computes the activations A
+% (i.e. the probability P(T|J) of the data conditioned on each
+% component density) for a Gaussian mixture model.
+%
+% See also
+% MDNERR, MDNPOST
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+% Check arguments for consistency
+errstring = consist(mixparams, 'mdnmixes');
+if ~isempty(errstring)
+ error(errstring);
+end
+
+ntarget = size(t, 1);
+if ntarget ~= size(mixparams.centres, 1)
+ error('Number of targets does not match number of mixtures')
+end
+if size(t, 2) ~= mixparams.dim_target
+ error('Target dimension does not match mixture dimension')
+end
+
+dim_target = mixparams.dim_target;
+ntarget = size(t, 1);
+
+% Calculate squared norm matrix, of dimension (ndata, ncentres)
+% vector (ntarget * ncentres)
+dist2 = mdndist2(mixparams, t);
+
+% Calculate variance factors
+variance = 2.*mixparams.covars;
+
+% Compute the normalisation term
+normal = ((2.*pi).*mixparams.covars).^(dim_target./2);
+
+% Now compute the activations
+a = exp(-(dist2./variance))./normal;
+
+% Accumulate negative log likelihood of targets
+prob = mixparams.mixcoeffs.*a;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mdnunpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mdnunpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function net = mdnunpak(net, w)
+%MDNUNPAK Separates weights vector into weight and bias matrices.
+%
+% Description
+% NET = MDNUNPAK(NET, W) takes an mdn network data structure NET and a
+% weight vector W, and returns a network data structure identical to
+% the input network, except that the weights in the MLP sub-structure
+% are set to the corresponding elements of W.
+%
+% See also
+% MDN, MDNPAK, MDNFWD, MDNERR, MDNGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+% David J Evans (1998)
+
+errstring = consist(net, 'mdn');
+if ~errstring
+ error(errstring);
+end
+if net.nwts ~= length(w)
+ error('Invalid weight vector length')
+end
+
+net.mlp = mlpunpak(net.mlp, w);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/metrop.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/metrop.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,193 @@
+function [samples, energies, diagn] = metrop(f, x, options, gradf, varargin)
+%METROP Markov Chain Monte Carlo sampling with Metropolis algorithm.
+%
+% Description
+% SAMPLES = METROP(F, X, OPTIONS) uses the Metropolis algorithm to
+% sample from the distribution P ~ EXP(-F), where F is the first
+% argument to METROP. The Markov chain starts at the point X and each
+% candidate state is picked from a Gaussian proposal distribution and
+% accepted or rejected according to the Metropolis criterion.
+%
+% SAMPLES = METROP(F, X, OPTIONS, [], P1, P2, ...) allows additional
+% arguments to be passed to F(). The fourth argument is ignored, but
+% is included for compatibility with HMC and the optimisers.
+%
+% [SAMPLES, ENERGIES, DIAGN] = METROP(F, X, OPTIONS) also returns a log
+% of the energy values (i.e. negative log probabilities) for the
+% samples in ENERGIES and DIAGN, a structure containing diagnostic
+% information (position and acceptance threshold) for each step of the
+% chain in DIAGN.POS and DIAGN.ACC respectively. All candidate states
+% (including rejected ones) are stored in DIAGN.POS.
+%
+% S = METROP('STATE') returns a state structure that contains the state
+% of the two random number generators RAND and RANDN. These are
+% contained in fields randstate, randnstate.
+%
+% METROP('STATE', S) resets the state to S. If S is an integer, then
+% it is passed to RAND and RANDN. If S is a structure returned by
+% METROP('STATE') then it resets the generator to exactly the same
+% state.
+%
+% The optional parameters in the OPTIONS vector have the following
+% interpretations.
+%
+% OPTIONS(1) is set to 1 to display the energy values and rejection
+% threshold at each step of the Markov chain. If the value is 2, then
+% the position vectors at each step are also displayed.
+%
+% OPTIONS(14) is the number of samples retained from the Markov chain;
+% default 100.
+%
+% OPTIONS(15) is the number of samples omitted from the start of the
+% chain; default 0.
+%
+% OPTIONS(18) is the variance of the proposal distribution; default 1.
+%
+% See also
+% HMC
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargin <= 2
+ if ~strcmp(f, 'state')
+ error('Unknown argument to metrop');
+ end
+ switch nargin
+ case 1
+ % Return state of sampler
+ samples = get_state(f); % Function defined in this module
+ return;
+ case 2
+ % Set the state of the sampler
+ set_state(f, x); % Function defined in this module
+ return;
+ end
+end
+
+if 0
+seed = 42;
+randn('state', seed);
+rand('state', seed)
+end
+
+display = options(1);
+if options(14) > 0
+ nsamples = options(14);
+else
+ nsamples = 100;
+end
+if options(15) >= 0
+ nomit = options(15);
+else
+ nomit = 0;
+end
+if options(18) > 0.0
+ std_dev = sqrt(options(18));
+else
+ std_dev = 1.0; % default
+end
+nparams = length(x);
+
+% Set up string for evaluating potential function.
+f = fcnchk(f, length(varargin));
+
+samples = zeros(nsamples, nparams); % Matrix of returned samples.
+if nargout >= 2
+ en_save = 1;
+ energies = zeros(nsamples, 1);
+else
+ en_save = 0;
+end
+if nargout >= 3
+ diagnostics = 1;
+ diagn_pos = zeros(nsamples, nparams);
+ diagn_acc = zeros(nsamples, 1);
+else
+ diagnostics = 0;
+end
+
+% Main loop.
+n = - nomit + 1;
+Eold = feval(f, x, varargin{:}); % Evaluate starting energy.
+nreject = 0; % Initialise count of rejected states.
+while n <= nsamples
+
+ xold = x;
+ % Sample a new point from the proposal distribution
+ x = xold + randn(1, nparams)*std_dev;
+ %fprintf('netlab propose: xold = %5.3f,%5.3f, xnew = %5.3f,%5.3f\n',...
+ % xold(1), xold(2), x(1), x(2));
+
+ % Now apply Metropolis algorithm.
+ Enew = feval(f, x, varargin{:}); % Evaluate new energy.
+ a = exp(Eold - Enew); % Acceptance threshold.
+ if (diagnostics & n > 0)
+ diagn_pos(n,:) = x;
+ diagn_acc(n,:) = a;
+ end
+ if (display > 1)
+ fprintf(1, 'New position is\n');
+ disp(x);
+ end
+
+ r = rand(1);
+ %fprintf('netlab: n=%d, a=%f/%f=%5.3f (%5.3f), r=%5.3f\n',...
+ % n, exp(-Enew), exp(-Eold), a, exp(-Enew)/exp(-Eold), r);
+ if a > r % Accept the new state.
+ Eold = Enew;
+ if (display > 0)
+ fprintf(1, 'Finished step %4d Threshold: %g\n', n, a);
+ end
+ else % Reject the new state
+ if n > 0
+ nreject = nreject + 1;
+ end
+ x = xold; % Reset position
+ if (display > 0)
+ fprintf(1, ' Sample rejected %4d. Threshold: %g\n', n, a);
+ end
+ end
+ if n > 0
+ samples(n,:) = x; % Store sample.
+ if en_save
+ energies(n) = Eold; % Store energy.
+ end
+ end
+ n = n + 1;
+end
+
+if (display > 0)
+ fprintf(1, '\nFraction of samples rejected: %g\n', ...
+ nreject/(nsamples));
+end
+
+if diagnostics
+ diagn.pos = diagn_pos;
+ diagn.acc = diagn_acc;
+end
+
+% Return complete state of the sampler.
+function state = get_state(f)
+
+state.randstate = rand('state');
+state.randnstate = randn('state');
+return
+
+% Set state of sampler, either from full state, or with an integer
+function set_state(f, x)
+
+if isnumeric(x)
+ rand('state', x);
+ randn('state', x);
+else
+ if ~isstruct(x)
+ error('Second argument to metrop must be number or state structure');
+ end
+ if (~isfield(x, 'randstate') | ~isfield(x, 'randnstate'))
+ error('Second argument to metrop must contain correct fields')
+ end
+ rand('state', x.randstate);
+ randn('state', x.randnstate);
+end
+return
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/minbrack.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/minbrack.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,127 @@
+function [br_min, br_mid, br_max, num_evals] = minbrack(f, a, b, fa, ...
+ varargin)
+%MINBRACK Bracket a minimum of a function of one variable.
+%
+% Description
+% BRMIN, BRMID, BRMAX, NUMEVALS] = MINBRACK(F, A, B, FA) finds a
+% bracket of three points around a local minimum of F. The function F
+% must have a one dimensional domain. A < B is an initial guess at the
+% minimum and maximum points of a bracket, but MINBRACK will search
+% outside this interval if necessary. The bracket consists of three
+% points (in increasing order) such that F(BRMID) < F(BRMIN) and
+% F(BRMID) < F(BRMAX). FA is the value of the function at A: it is
+% included to avoid unnecessary function evaluations in the
+% optimization routines. The return value NUMEVALS is the number of
+% function evaluations in MINBRACK.
+%
+% MINBRACK(F, A, B, FA, P1, P2, ...) allows additional arguments to be
+% passed to F
+%
+% See also
+% LINEMIN, LINEF
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check function string
+f = fcnchk(f, length(varargin));
+
+% Value of golden section (1 + sqrt(5))/2.0
+phi = 1.6180339887499;
+
+% Initialise count of number of function evaluations
+num_evals = 0;
+
+% A small non-zero number to avoid dividing by zero in quadratic interpolation
+TINY = 1.e-10;
+
+% Maximal proportional step to take: don't want to make this too big
+% as then spend a lot of time finding the minimum inside the bracket
+max_step = 10.0;
+
+fb = feval(f, b, varargin{:});
+num_evals = num_evals + 1;
+
+% Assume that we know going from a to b is downhill initially
+% (usually because gradf(a) < 0).
+if (fb > fa)
+ % Minimum must lie between a and b: do golden section until we find point
+ % low enough to be middle of bracket
+ c = b;
+ b = a + (c-a)/phi;
+ fb = feval(f, b, varargin{:});
+ num_evals = num_evals + 1;
+ while (fb > fa)
+ c = b;
+ b = a + (c-a)/phi;
+ fb = feval(f, b, varargin{:});
+ num_evals = num_evals + 1;
+ end
+else
+ % There is a valid bracket upper bound greater than b
+ c = b + phi*(b-a);
+ fc = feval(f, c, varargin{:});
+ num_evals = num_evals + 1;
+ bracket_found = 0;
+
+ while (fb > fc)
+ % Do a quadratic interpolation (i.e. to minimum of quadratic)
+ r = (b-a).*(fb-fc);
+ q = (b-c).*(fb-fa);
+ u = b - ((b-c)*q - (b-a)*r)/(2.0*(sign(q-r)*max([abs(q-r), TINY])));
+ ulimit = b + max_step*(c-b);
+
+ if ((b-u)'*(u-c) > 0.0)
+ % Interpolant lies between b and c
+ fu = feval(f, u, varargin{:});
+ num_evals = num_evals + 1;
+ if (fu < fc)
+ % Have a minimum between b and c
+ br_min = b;
+ br_mid = u;
+ br_max = c;
+ return;
+ elseif (fu > fb)
+ % Have a minimum between a and u
+ br_min = a;
+ br_mid = c;
+ br_max = u;
+ return;
+ end
+ % Quadratic interpolation didn't give a bracket, so take a golden step
+ u = c + phi*(c-b);
+ elseif ((c-u)'*(u-ulimit) > 0.0)
+ % Interpolant lies between c and limit
+ fu = feval(f, u, varargin{:});
+ num_evals = num_evals + 1;
+ if (fu < fc)
+ % Move bracket along, and then take a golden section step
+ b = c;
+ c = u;
+ u = c + phi*(c-b);
+ else
+ bracket_found = 1;
+ end
+ elseif ((u-ulimit)'*(ulimit-c) >= 0.0)
+ % Limit parabolic u to maximum value
+ u = ulimit;
+ else
+ % Reject parabolic u and use golden section step
+ u = c + phi*(c-b);
+ end
+ if ~bracket_found
+ fu = feval(f, u, varargin{:});
+ num_evals = num_evals + 1;
+ end
+ a = b; b = c; c = u;
+ fa = fb; fb = fc; fc = fu;
+ end % while loop
+end % bracket found
+br_mid = b;
+if (a < c)
+ br_min = a;
+ br_max = c;
+else
+ br_min = c;
+ br_max = a;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlp.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlp.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,92 @@
+function net = mlp(nin, nhidden, nout, outfunc, prior, beta)
+%MLP Create a 2-layer feedforward network.
+%
+% Description
+% NET = MLP(NIN, NHIDDEN, NOUT, FUNC) takes the number of inputs,
+% hidden units and output units for a 2-layer feed-forward network,
+% together with a string FUNC which specifies the output unit
+% activation function, and returns a data structure NET. The weights
+% are drawn from a zero mean, unit variance isotropic Gaussian, with
+% varianced scaled by the fan-in of the hidden or output units as
+% appropriate. This makes use of the Matlab function RANDN and so the
+% seed for the random weight initialization can be set using
+% RANDN('STATE', S) where S is the seed value. The hidden units use
+% the TANH activation function.
+%
+% The fields in NET are
+% type = 'mlp'
+% nin = number of inputs
+% nhidden = number of hidden units
+% nout = number of outputs
+% nwts = total number of weights and biases
+% actfn = string describing the output unit activation function:
+% 'linear'
+% 'logistic
+% 'softmax'
+% w1 = first-layer weight matrix
+% b1 = first-layer bias vector
+% w2 = second-layer weight matrix
+% b2 = second-layer bias vector
+% Here W1 has dimensions NIN times NHIDDEN, B1 has dimensions 1 times
+% NHIDDEN, W2 has dimensions NHIDDEN times NOUT, and B2 has dimensions
+% 1 times NOUT.
+%
+% NET = MLP(NIN, NHIDDEN, NOUT, FUNC, PRIOR), in which PRIOR is a
+% scalar, allows the field NET.ALPHA in the data structure NET to be
+% set, corresponding to a zero-mean isotropic Gaussian prior with
+% inverse variance with value PRIOR. Alternatively, PRIOR can consist
+% of a data structure with fields ALPHA and INDEX, allowing individual
+% Gaussian priors to be set over groups of weights in the network. Here
+% ALPHA is a column vector in which each element corresponds to a
+% separate group of weights, which need not be mutually exclusive. The
+% membership of the groups is defined by the matrix INDX in which the
+% columns correspond to the elements of ALPHA. Each column has one
+% element for each weight in the matrix, in the order defined by the
+% function MLPPAK, and each element is 1 or 0 according to whether the
+% weight is a member of the corresponding group or not. A utility
+% function MLPPRIOR is provided to help in setting up the PRIOR data
+% structure.
+%
+% NET = MLP(NIN, NHIDDEN, NOUT, FUNC, PRIOR, BETA) also sets the
+% additional field NET.BETA in the data structure NET, where beta
+% corresponds to the inverse noise variance.
+%
+% See also
+% MLPPRIOR, MLPPAK, MLPUNPAK, MLPFWD, MLPERR, MLPBKP, MLPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+net.type = 'mlp';
+net.nin = nin;
+net.nhidden = nhidden;
+net.nout = nout;
+net.nwts = (nin + 1)*nhidden + (nhidden + 1)*nout;
+
+outfns = {'linear', 'logistic', 'softmax'};
+
+if sum(strcmp(outfunc, outfns)) == 0
+ error('Undefined output function. Exiting.');
+else
+ net.outfn = outfunc;
+end
+
+if nargin > 4
+ if isstruct(prior)
+ net.alpha = prior.alpha;
+ net.index = prior.index;
+ elseif size(prior) == [1 1]
+ net.alpha = prior;
+ else
+ error('prior must be a scalar or a structure');
+ end
+end
+
+net.w1 = randn(nin, nhidden)/sqrt(nin + 1);
+net.b1 = randn(1, nhidden)/sqrt(nin + 1);
+net.w2 = randn(nhidden, nout)/sqrt(nhidden + 1);
+net.b2 = randn(1, nout)/sqrt(nhidden + 1);
+
+if nargin == 6
+ net.beta = beta;
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlpbkp.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlpbkp.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,37 @@
+function g = mlpbkp(net, x, z, deltas)
+%MLPBKP Backpropagate gradient of error function for 2-layer network.
+%
+% Description
+% G = MLPBKP(NET, X, Z, DELTAS) takes a network data structure NET
+% together with a matrix X of input vectors, a matrix Z of hidden unit
+% activations, and a matrix DELTAS of the gradient of the error
+% function with respect to the values of the output units (i.e. the
+% summed inputs to the output units, before the activation function is
+% applied). The return value is the gradient G of the error function
+% with respect to the network weights. Each row of X corresponds to one
+% input vector.
+%
+% This function is provided so that the common backpropagation
+% algorithm can be used by multi-layer perceptron network models to
+% compute gradients for mixture density networks as well as standard
+% error functions.
+%
+% See also
+% MLP, MLPGRAD, MLPDERIV, MDNGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Evaluate second-layer gradients.
+gw2 = z'*deltas;
+gb2 = sum(deltas, 1);
+
+% Now do the backpropagation.
+delhid = deltas*net.w2';
+delhid = delhid.*(1.0 - z.*z);
+
+% Finally, evaluate the first-layer gradients.
+gw1 = x'*delhid;
+gb1 = sum(delhid, 1);
+
+g = [gw1(:)', gb1, gw2(:)', gb2];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlpderiv.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlpderiv.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,47 @@
+function g = mlpderiv(net, x)
+%MLPDERIV Evaluate derivatives of network outputs with respect to weights.
+%
+% Description
+% G = MLPDERIV(NET, X) takes a network data structure NET and a matrix
+% of input vectors X and returns a three-index matrix G whose I, J, K
+% element contains the derivative of network output K with respect to
+% weight or bias parameter J for input pattern I. The ordering of the
+% weight and bias parameters is defined by MLPUNPAK.
+%
+% See also
+% MLP, MLPPAK, MLPGRAD, MLPBKP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'mlp', x);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+[y, z] = mlpfwd(net, x);
+
+ndata = size(x, 1);
+
+if isfield(net, 'mask')
+ nwts = size(find(net.mask), 1);
+ temp = zeros(1, net.nwts);
+else
+ nwts = net.nwts;
+end
+
+g = zeros(ndata, nwts, net.nout);
+for k = 1 : net.nout
+ delta = zeros(1, net.nout);
+ delta(1, k) = 1;
+ for n = 1 : ndata
+ if isfield(net, 'mask')
+ temp = mlpbkp(net, x(n, :), z(n, :), delta);
+ g(n, :, k) = temp(logical(net.mask));
+ else
+ g(n, :, k) = mlpbkp(net, x(n, :), z(n, :),...
+ delta);
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlperr.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlperr.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+function [e, edata, eprior] = mlperr(net, x, t)
+%MLPERR Evaluate error function for 2-layer network.
+%
+% Description
+% E = MLPERR(NET, X, T) takes a network data structure NET together
+% with a matrix X of input vectors and a matrix T of target vectors,
+% and evaluates the error function E. The choice of error function
+% corresponds to the output unit activation function. Each row of X
+% corresponds to one input vector and each row of T corresponds to one
+% target vector.
+%
+% [E, EDATA, EPRIOR] = MLPERR(NET, X, T) additionally returns the data
+% and prior components of the error, assuming a zero mean Gaussian
+% prior on the weights with inverse variance parameters ALPHA and BETA
+% taken from the network data structure NET.
+%
+% See also
+% MLP, MLPPAK, MLPUNPAK, MLPFWD, MLPBKP, MLPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'mlp', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+[y, z, a] = mlpfwd(net, x);
+
+switch net.outfn
+
+ case 'linear' % Linear outputs
+ edata = 0.5*sum(sum((y - t).^2));
+
+ case 'logistic' % Logistic outputs
+ % Ensure that log(1-y) is computable: need exp(a) > eps
+ maxcut = -log(eps);
+ % Ensure that log(y) is computable
+ mincut = -log(1/realmin - 1);
+ a = min(a, maxcut);
+ a = max(a, mincut);
+ y = 1./(1 + exp(-a));
+ edata = - sum(sum(t.*log(y) + (1 - t).*log(1 - y)));
+
+ case 'softmax' % Softmax outputs
+ nout = size(a,2);
+ % Ensure that sum(exp(a), 2) does not overflow
+ maxcut = log(realmax) - log(nout);
+ % Ensure that exp(a) > 0
+ mincut = log(realmin);
+ a = min(a, maxcut);
+ a = max(a, mincut);
+ temp = exp(a);
+ y = temp./(sum(temp, 2)*ones(1,nout));
+ % Ensure that log(y) is computable
+ y(y eps
+ maxcut = -log(eps);
+ % Ensure that log(y) is computable
+ mincut = -log(1/realmin - 1);
+ a = min(a, maxcut);
+ a = max(a, mincut);
+ y = 1./(1 + exp(-a));
+
+ case 'softmax' % Softmax outputs
+
+ % Prevent overflow and underflow: use same bounds as glmerr
+ % Ensure that sum(exp(a), 2) does not overflow
+ maxcut = log(realmax) - log(net.nout);
+ % Ensure that exp(a) > 0
+ mincut = log(realmin);
+ a = min(a, maxcut);
+ a = max(a, mincut);
+ temp = exp(a);
+ y = temp./(sum(temp, 2)*ones(1, net.nout));
+
+ otherwise
+ error(['Unknown activation function ', net.outfn]);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlpgrad.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlpgrad.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,33 @@
+function [g, gdata, gprior] = mlpgrad(net, x, t)
+%MLPGRAD Evaluate gradient of error function for 2-layer network.
+%
+% Description
+% G = MLPGRAD(NET, X, T) takes a network data structure NET together
+% with a matrix X of input vectors and a matrix T of target vectors,
+% and evaluates the gradient G of the error function with respect to
+% the network weights. The error funcion corresponds to the choice of
+% output unit activation function. Each row of X corresponds to one
+% input vector and each row of T corresponds to one target vector.
+%
+% [G, GDATA, GPRIOR] = MLPGRAD(NET, X, T) also returns separately the
+% data and prior contributions to the gradient. In the case of multiple
+% groups in the prior, GPRIOR is a matrix with a row for each group and
+% a column for each weight parameter.
+%
+% See also
+% MLP, MLPPAK, MLPUNPAK, MLPFWD, MLPERR, MLPBKP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'mlp', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+[y, z] = mlpfwd(net, x);
+delout = y - t;
+
+gdata = mlpbkp(net, x, z, delout);
+
+[g, gdata, gprior] = gbayes(net, gdata);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlphdotv.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlphdotv.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,79 @@
+function hdv = mlphdotv(net, x, t, v)
+%MLPHDOTV Evaluate the product of the data Hessian with a vector.
+%
+% Description
+%
+% HDV = MLPHDOTV(NET, X, T, V) takes an MLP network data structure NET,
+% together with the matrix X of input vectors, the matrix T of target
+% vectors and an arbitrary row vector V whose length equals the number
+% of parameters in the network, and returns the product of the data-
+% dependent contribution to the Hessian matrix with V. The
+% implementation is based on the R-propagation algorithm of
+% Pearlmutter.
+%
+% See also
+% MLP, MLPHESS, HESSCHEK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'mlp', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+ndata = size(x, 1);
+
+[y, z] = mlpfwd(net, x); % Standard forward propagation.
+zprime = (1 - z.*z); % Hidden unit first derivatives.
+zpprime = -2.0*z.*zprime; % Hidden unit second derivatives.
+
+vnet = mlpunpak(net, v); % Unpack the v vector.
+
+% Do the R-forward propagation.
+
+ra1 = x*vnet.w1 + ones(ndata, 1)*vnet.b1;
+rz = zprime.*ra1;
+ra2 = rz*net.w2 + z*vnet.w2 + ones(ndata, 1)*vnet.b2;
+
+switch net.outfn
+
+ case 'linear' % Linear outputs
+
+ ry = ra2;
+
+ case 'logistic' % Logistic outputs
+
+ ry = y.*(1 - y).*ra2;
+
+ case 'softmax' % Softmax outputs
+
+ nout = size(t, 2);
+ ry = y.*ra2 - y.*(sum(y.*ra2, 2)*ones(1, nout));
+
+ otherwise
+ error(['Unknown activation function ', net.outfn]);
+end
+
+% Evaluate delta for the output units.
+
+delout = y - t;
+
+% Do the standard backpropagation.
+
+delhid = zprime.*(delout*net.w2');
+
+% Now do the R-backpropagation.
+
+rdelhid = zpprime.*ra1.*(delout*net.w2') + zprime.*(delout*vnet.w2') + ...
+ zprime.*(ry*net.w2');
+
+% Finally, evaluate the components of hdv and then merge into long vector.
+
+hw1 = x'*rdelhid;
+hb1 = sum(rdelhid, 1);
+hw2 = z'*ry + rz'*delout;
+hb2 = sum(ry, 1);
+
+hdv = [hw1(:)', hb1, hw2(:)', hb2];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlphess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlphess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,51 @@
+function [h, hdata] = mlphess(net, x, t, hdata)
+%MLPHESS Evaluate the Hessian matrix for a multi-layer perceptron network.
+%
+% Description
+% H = MLPHESS(NET, X, T) takes an MLP network data structure NET, a
+% matrix X of input values, and a matrix T of target values and returns
+% the full Hessian matrix H corresponding to the second derivatives of
+% the negative log posterior distribution, evaluated for the current
+% weight and bias values as defined by NET.
+%
+% [H, HDATA] = MLPHESS(NET, X, T) returns both the Hessian matrix H and
+% the contribution HDATA arising from the data dependent term in the
+% Hessian.
+%
+% H = MLPHESS(NET, X, T, HDATA) takes a network data structure NET, a
+% matrix X of input values, and a matrix T of target values, together
+% with the contribution HDATA arising from the data dependent term in
+% the Hessian, and returns the full Hessian matrix H corresponding to
+% the second derivatives of the negative log posterior distribution.
+% This version saves computation time if HDATA has already been
+% evaluated for the current weight and bias values.
+%
+% See also
+% MLP, HESSCHEK, MLPHDOTV, EVIDENCE
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'mlp', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+if nargin == 3
+ % Data term in Hessian needs to be computed
+ hdata = datahess(net, x, t);
+end
+
+[h, hdata] = hbayes(net, hdata);
+
+% Sub-function to compute data part of Hessian
+function hdata = datahess(net, x, t)
+
+hdata = zeros(net.nwts, net.nwts);
+
+for v = eye(net.nwts);
+ hdata(find(v),:) = mlphdotv(net, x, t, v);
+end
+
+return
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlphint.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlphint.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,92 @@
+function [h1, h2] = mlphint(net);
+%MLPHINT Plot Hinton diagram for 2-layer feed-forward network.
+%
+% Description
+%
+% MLPHINT(NET) takes a network structure NET and plots the Hinton
+% diagram comprised of two figure windows, one displaying the first-
+% layer weights and biases, and one displaying the second-layer weights
+% and biases.
+%
+% [H1, H2] = MLPHINT(NET) also returns handles H1 and H2 to the
+% figures which can be used, for instance, to delete the figures when
+% they are no longer needed.
+%
+% To print the figure correctly, you should call SET(H,
+% 'INVERTHARDCOPY', 'ON') before printing.
+%
+% See also
+% DEMHINT, HINTMAT, MLP, MLPPAK, MLPUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set scale to be up to 0.9 of maximum absolute weight value, where scale
+% defined so that area of box proportional to weight value.
+
+% Use no more than 640x480 pixels
+xmax = 640; ymax = 480;
+
+% Offset bottom left hand corner
+x01 = 40; y01 = 40;
+x02 = 80; y02 = 80;
+
+% Need to allow 5 pixels border for window frame: but 30 at top
+border = 5;
+top_border = 30;
+
+ymax = ymax - top_border;
+xmax = xmax - border;
+
+% First layer
+
+wb1 = [net.w1; net.b1];
+[xvals, yvals, color] = hintmat(wb1');
+% Try to preserve aspect ratio approximately
+if (8*net.nhidden < 6*(net.nin + 1))
+ delx = xmax; dely = xmax*net.nhidden/(net.nin + 1);
+else
+ delx = ymax*(net.nin + 1)/net.nhidden; dely = ymax;
+end
+
+h1 = figure('Color', [0.5 0.5 0.5], ...
+ 'Name', 'Hinton diagram: first-layer weights and biases', ...
+ 'NumberTitle', 'off', ...
+ 'Colormap', [0 0 0; 1 1 1], ...
+ 'Units', 'pixels', ...
+ 'Position', [x01 y01 delx dely]);
+set(gca, 'Visible', 'off', 'Position', [0 0 1 1]);
+hold on
+
+cmap = [0 0 0; 1 1 1];
+colors(1, :, :) = cmap(color, :);
+patch(xvals', yvals', colors, 'Edgecolor', 'none');
+axis equal;
+xpos = net.nin;
+line([xpos xpos], [0 net.nhidden], 'color', 'red', 'linewidth', 3);
+
+% Second layer
+
+wb2 = [net.w2; net.b2];
+[xvals, yvals, color] = hintmat(wb2');
+if (8*net.nout < 6*(net.nhidden + 1))
+ delx = xmax; dely = xmax*net.nout/(net.nhidden + 1);
+else
+ delx = ymax*(net.nhidden + 1)/net.nout; dely = ymax;
+end
+
+h2 = figure('Color', [0.5 0.5 0.5], ...
+ 'Name', 'Hinton diagram: second-layer weights and biases', ...
+ 'NumberTitle', 'off', ...
+ 'Colormap', [0 0 0; 1 1 1], ...
+ 'Units', 'pixels', ...
+ 'Position', [x02 y02 delx dely]);
+set(gca, 'Visible', 'off', 'Position', [0 0 1 1]);
+
+hold on
+colors2(1, :, :) = cmap(color, :);
+patch(xvals', yvals', colors2, 'Edgecolor', 'none');
+axis equal;
+xpos = net.nhidden;
+line([xpos xpos], [0 net.nout], 'color', 'red', 'linewidth', 3);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlpinit.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlpinit.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+function net = mlpinit(net, prior)
+%MLPINIT Initialise the weights in a 2-layer feedforward network.
+%
+% Description
+%
+% NET = MLPINIT(NET, PRIOR) takes a 2-layer feedforward network NET and
+% sets the weights and biases by sampling from a Gaussian distribution.
+% If PRIOR is a scalar, then all of the parameters (weights and biases)
+% are sampled from a single isotropic Gaussian with inverse variance
+% equal to PRIOR. If PRIOR is a data structure of the kind generated by
+% MLPPRIOR, then the parameters are sampled from multiple Gaussians
+% according to their groupings (defined by the INDEX field) with
+% corresponding variances (defined by the ALPHA field).
+%
+% See also
+% MLP, MLPPRIOR, MLPPAK, MLPUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if isstruct(prior)
+ sig = 1./sqrt(prior.index*prior.alpha);
+ w = sig'.*randn(1, net.nwts);
+elseif size(prior) == [1 1]
+ w = randn(1, net.nwts).*sqrt(1/prior);
+else
+ error('prior must be a scalar or a structure');
+end
+
+net = mlpunpak(net, w);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlppak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlppak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,31 @@
+function w = mlppak(net)
+%MLPPAK Combines weights and biases into one weights vector.
+%
+% Description
+% W = MLPPAK(NET) takes a network data structure NET and combines the
+% component weight matrices bias vectors into a single row vector W.
+% The facility to switch between these two representations for the
+% network parameters is useful, for example, in training a network by
+% error function minimization, since a single vector of parameters can
+% be handled by general-purpose optimization routines.
+%
+% The ordering of the paramters in W is defined by
+% w = [net.w1(:)', net.b1, net.w2(:)', net.b2];
+% where W1 is the first-layer weight matrix, B1 is the first-layer
+% bias vector, W2 is the second-layer weight matrix, and B2 is the
+% second-layer bias vector.
+%
+% See also
+% MLP, MLPUNPAK, MLPFWD, MLPERR, MLPBKP, MLPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'mlp');
+if ~isempty(errstring);
+ error(errstring);
+end
+
+w = [net.w1(:)', net.b1, net.w2(:)', net.b2];
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlpprior.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlpprior.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,62 @@
+function prior = mlpprior(nin, nhidden, nout, aw1, ab1, aw2, ab2)
+%MLPPRIOR Create Gaussian prior for mlp.
+%
+% Description
+% PRIOR = MLPPRIOR(NIN, NHIDDEN, NOUT, AW1, AB1, AW2, AB2) generates a
+% data structure PRIOR, with fields PRIOR.ALPHA and PRIOR.INDEX, which
+% specifies a Gaussian prior distribution for the network weights in a
+% two-layer feedforward network. Two different cases are possible. In
+% the first case, AW1, AB1, AW2 and AB2 are all scalars and represent
+% the regularization coefficients for four groups of parameters in the
+% network corresponding to first-layer weights, first-layer biases,
+% second-layer weights, and second-layer biases respectively. Then
+% PRIOR.ALPHA represents a column vector of length 4 containing the
+% parameters, and PRIOR.INDEX is a matrix specifying which weights
+% belong in each group. Each column has one element for each weight in
+% the matrix, using the standard ordering as defined in MLPPAK, and
+% each element is 1 or 0 according to whether the weight is a member of
+% the corresponding group or not. In the second case the parameter AW1
+% is a vector of length equal to the number of inputs in the network,
+% and the corresponding matrix PRIOR.INDEX now partitions the first-
+% layer weights into groups corresponding to the weights fanning out of
+% each input unit. This prior is appropriate for the technique of
+% automatic relevance determination.
+%
+% See also
+% MLP, MLPERR, MLPGRAD, EVIDENCE
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+nextra = nhidden + (nhidden + 1)*nout;
+nwts = nin*nhidden + nextra;
+
+if size(aw1) == [1,1]
+
+ indx = [ones(1, nin*nhidden), zeros(1, nextra)]';
+
+elseif size(aw1) == [1, nin]
+
+ indx = kron(ones(nhidden, 1), eye(nin));
+ indx = [indx; zeros(nextra, nin)];
+
+else
+
+ error('Parameter aw1 of invalid dimensions');
+
+end
+
+extra = zeros(nwts, 3);
+
+mark1 = nin*nhidden;
+mark2 = mark1 + nhidden;
+extra(mark1 + 1:mark2, 1) = ones(nhidden,1);
+mark3 = mark2 + nhidden*nout;
+extra(mark2 + 1:mark3, 2) = ones(nhidden*nout,1);
+mark4 = mark3 + nout;
+extra(mark3 + 1:mark4, 3) = ones(nout,1);
+
+indx = [indx, extra];
+
+prior.index = indx;
+prior.alpha = [aw1, ab1, aw2, ab2]';
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlptrain.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlptrain.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,24 @@
+function [net, error] = mlptrain(net, x, t, its);
+%MLPTRAIN Utility to train an MLP network for demtrain
+%
+% Description
+%
+% [NET, ERROR] = MLPTRAIN(NET, X, T, ITS) trains a network data
+% structure NET using the scaled conjugate gradient algorithm for ITS
+% cycles with input data X, target data T.
+%
+% See also
+% DEMTRAIN, SCG, NETOPT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+options = zeros(1,18);
+options(1) = -1; % To prevent any messages at all
+options(9) = 0;
+options(14) = its;
+
+[net, options] = netopt(net, options, x, t, 'scg');
+
+error = options(8);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/mlpunpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/mlpunpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function net = mlpunpak(net, w)
+%MLPUNPAK Separates weights vector into weight and bias matrices.
+%
+% Description
+% NET = MLPUNPAK(NET, W) takes an mlp network data structure NET and a
+% weight vector W, and returns a network data structure identical to
+% the input network, except that the first-layer weight matrix W1, the
+% first-layer bias vector B1, the second-layer weight matrix W2 and the
+% second-layer bias vector B2 have all been set to the corresponding
+% elements of W.
+%
+% See also
+% MLP, MLPPAK, MLPFWD, MLPERR, MLPBKP, MLPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'mlp');
+if ~isempty(errstring);
+ error(errstring);
+end
+
+if net.nwts ~= length(w)
+ error('Invalid weight vector length')
+end
+
+nin = net.nin;
+nhidden = net.nhidden;
+nout = net.nout;
+
+mark1 = nin*nhidden;
+net.w1 = reshape(w(1:mark1), nin, nhidden);
+mark2 = mark1 + nhidden;
+net.b1 = reshape(w(mark1 + 1: mark2), 1, nhidden);
+mark3 = mark2 + nhidden*nout;
+net.w2 = reshape(w(mark2 + 1: mark3), nhidden, nout);
+mark4 = mark3 + nout;
+net.b2 = reshape(w(mark3 + 1: mark4), 1, nout);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/netderiv.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/netderiv.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function g = netderiv(w, net, x)
+%NETDERIV Evaluate derivatives of network outputs by weights generically.
+%
+% Description
+%
+% G = NETDERIV(W, NET, X) takes a weight vector W and a network data
+% structure NET, together with the matrix X of input vectors, and
+% returns the gradient of the outputs with respect to the weights
+% evaluated at W.
+%
+% See also
+% NETEVFWD, NETOPT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+fstr = [net.type, 'deriv'];
+net = netunpak(net, w);
+g = feval(fstr, net, x);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/neterr.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/neterr.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function [e, varargout] = neterr(w, net, x, t)
+%NETERR Evaluate network error function for generic optimizers
+%
+% Description
+%
+% E = NETERR(W, NET, X, T) takes a weight vector W and a network data
+% structure NET, together with the matrix X of input vectors and the
+% matrix T of target vectors, and returns the value of the error
+% function evaluated at W.
+%
+% [E, VARARGOUT] = NETERR(W, NET, X, T) also returns any additional
+% return values from the error function.
+%
+% See also
+% NETGRAD, NETHESS, NETOPT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstr = [net.type, 'err'];
+net = netunpak(net, w);
+
+[s{1:nargout}] = feval(errstr, net, x, t);
+e = s{1};
+if nargout > 1
+ for i = 2:nargout
+ varargout{i-1} = s{i};
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/netevfwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/netevfwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function [y, extra, invhess] = netevfwd(w, net, x, t, x_test, invhess)
+%NETEVFWD Generic forward propagation with evidence for network
+%
+% Description
+% [Y, EXTRA] = NETEVFWD(W, NET, X, T, X_TEST) takes a network data
+% structure NET together with the input X and target T training data
+% and input test data X_TEST. It returns the normal forward propagation
+% through the network Y together with a matrix EXTRA which consists of
+% error bars (variance) for a regression problem or moderated outputs
+% for a classification problem.
+%
+% The optional argument (and return value) INVHESS is the inverse of
+% the network Hessian computed on the training data inputs and targets.
+% Passing it in avoids recomputing it, which can be a significant
+% saving for large training sets.
+%
+% See also
+% MLPEVFWD, RBFEVFWD, GLMEVFWD, FEVBAYES
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+func = [net.type, 'evfwd'];
+net = netunpak(net, w);
+if nargin == 5
+ [y, extra, invhess] = feval(func, net, x, t, x_test);
+else
+ [y, extra, invhess] = feval(func, net, x, t, x_test, invhess);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/netgrad.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/netgrad.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function g = netgrad(w, net, x, t)
+%NETGRAD Evaluate network error gradient for generic optimizers
+%
+% Description
+%
+% G = NETGRAD(W, NET, X, T) takes a weight vector W and a network data
+% structure NET, together with the matrix X of input vectors and the
+% matrix T of target vectors, and returns the gradient of the error
+% function evaluated at W.
+%
+% See also
+% MLP, NETERR, NETOPT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+gradstr = [net.type, 'grad'];
+
+net = netunpak(net, w);
+
+g = feval(gradstr, net, x, t);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/nethess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/nethess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function [h, varargout] = nethess(w, net, x, t, varargin)
+%NETHESS Evaluate network Hessian
+%
+% Description
+%
+% H = NETHESS(W, NET, X, T) takes a weight vector W and a network data
+% structure NET, together with the matrix X of input vectors and the
+% matrix T of target vectors, and returns the value of the Hessian
+% evaluated at W.
+%
+% [E, VARARGOUT] = NETHESS(W, NET, X, T, VARARGIN) also returns any
+% additional return values from the network Hessian function, and
+% passes additional arguments to that function.
+%
+% See also
+% NETERR, NETGRAD, NETOPT
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+hess_str = [net.type, 'hess'];
+
+net = netunpak(net, w);
+
+[s{1:nargout}] = feval(hess_str, net, x, t, varargin{:});
+h = s{1};
+for i = 2:nargout
+ varargout{i-1} = s{i};
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/netinit.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/netinit.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,45 @@
+function net = netinit(net, prior)
+%NETINIT Initialise the weights in a network.
+%
+% Description
+%
+% NET = NETINIT(NET, PRIOR) takes a network data structure NET and sets
+% the weights and biases by sampling from a Gaussian distribution. If
+% PRIOR is a scalar, then all of the parameters (weights and biases)
+% are sampled from a single isotropic Gaussian with inverse variance
+% equal to PRIOR. If PRIOR is a data structure of the kind generated by
+% MLPPRIOR, then the parameters are sampled from multiple Gaussians
+% according to their groupings (defined by the INDEX field) with
+% corresponding variances (defined by the ALPHA field).
+%
+% See also
+% MLPPRIOR, NETUNPAK, RBFPRIOR
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if isstruct(prior)
+ if (isfield(net, 'mask'))
+ if find(sum(prior.index, 2)) ~= find(net.mask)
+ error('Index does not match mask');
+ end
+ sig = sqrt(prior.index*prior.alpha);
+ % Weights corresponding to zeros in mask will not be used anyway
+ % Set their priors to one to avoid division by zero
+ sig = sig + (sig == 0);
+ sig = 1./sqrt(sig);
+ else
+ sig = 1./sqrt(prior.index*prior.alpha);
+ end
+ w = sig'.*randn(1, net.nwts);
+elseif size(prior) == [1 1]
+ w = randn(1, net.nwts).*sqrt(1/prior);
+else
+ error('prior must be a scalar or a structure');
+end
+
+if (isfield(net, 'mask'))
+ w = w(logical(net.mask));
+end
+net = netunpak(net, w);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/netlab3.3.zip
Binary file toolboxes/FullBNT-1.0.7/netlab3.3/netlab3.3.zip has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/netlogo.mat
Binary file toolboxes/FullBNT-1.0.7/netlab3.3/netlogo.mat has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/netopt.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/netopt.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function [net, options, varargout] = netopt(net, options, x, t, alg);
+%NETOPT Optimize the weights in a network model.
+%
+% Description
+%
+% NETOPT is a helper function which facilitates the training of
+% networks using the general purpose optimizers as well as sampling
+% from the posterior distribution of parameters using general purpose
+% Markov chain Monte Carlo sampling algorithms. It can be used with any
+% function that searches in parameter space using error and gradient
+% functions.
+%
+% [NET, OPTIONS] = NETOPT(NET, OPTIONS, X, T, ALG) takes a network
+% data structure NET, together with a vector OPTIONS of parameters
+% governing the behaviour of the optimization algorithm, a matrix X of
+% input vectors and a matrix T of target vectors, and returns the
+% trained network as well as an updated OPTIONS vector. The string ALG
+% determines which optimization algorithm (CONJGRAD, QUASINEW, SCG,
+% etc.) or Monte Carlo algorithm (such as HMC) will be used.
+%
+% [NET, OPTIONS, VARARGOUT] = NETOPT(NET, OPTIONS, X, T, ALG) also
+% returns any additional return values from the optimisation algorithm.
+%
+% See also
+% NETGRAD, BFGS, CONJGRAD, GRADDESC, HMC, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+optstring = [alg, '(''neterr'', w, options, ''netgrad'', net, x, t)'];
+
+% Extract weights from network as single vector
+w = netpak(net);
+
+% Carry out optimisation
+[s{1:nargout}] = eval(optstring);
+w = s{1};
+
+if nargout > 1
+ options = s{2};
+
+ % If there are additional arguments, extract them
+ nextra = nargout - 2;
+ if nextra > 0
+ for i = 1:nextra
+ varargout{i} = s{i+2};
+ end
+ end
+end
+
+% Pack the weights back into the network
+net = netunpak(net, w);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/netpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/netpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,25 @@
+function w = netpak(net)
+%NETPAK Combines weights and biases into one weights vector.
+%
+% Description
+% W = NETPAK(NET) takes a network data structure NET and combines the
+% component weight matrices into a single row vector W. The facility
+% to switch between these two representations for the network
+% parameters is useful, for example, in training a network by error
+% function minimization, since a single vector of parameters can be
+% handled by general-purpose optimization routines. This function also
+% takes into account a MASK defined as a field in NET by removing any
+% weights that correspond to entries of 0 in the mask.
+%
+% See also
+% NET, NETUNPAK, NETFWD, NETERR, NETGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+pakstr = [net.type, 'pak'];
+w = feval(pakstr, net);
+% Return masked subset of weights
+if (isfield(net, 'mask'))
+ w = w(logical(net.mask));
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/netunpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/netunpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,34 @@
+function net = netunpak(net, w)
+%NETUNPAK Separates weights vector into weight and bias matrices.
+%
+% Description
+% NET = NETUNPAK(NET, W) takes an net network data structure NET and a
+% weight vector W, and returns a network data structure identical to
+% the input network, except that the componenet weight matrices have
+% all been set to the corresponding elements of W. If there is a MASK
+% field in the NET data structure, then the weights in W are placed in
+% locations corresponding to non-zero entries in the mask (so W should
+% have the same length as the number of non-zero entries in the MASK).
+%
+% See also
+% NETPAK, NETFWD, NETERR, NETGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+unpakstr = [net.type, 'unpak'];
+
+% Check if we are being passed a masked set of weights
+if (isfield(net, 'mask'))
+ if length(w) ~= size(find(net.mask), 1)
+ error('Weight vector length does not match mask length')
+ end
+ % Do a full pack of all current network weights
+ pakstr = [net.type, 'pak'];
+ fullw = feval(pakstr, net);
+ % Replace current weights with new ones
+ fullw(logical(net.mask)) = w;
+ w = fullw;
+end
+
+net = feval(unpakstr, net, w);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/oilTrn.dat
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/oilTrn.dat Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,503 @@
+ nin 12
+ nout 2
+ ndata 500
+ 3.315000e-01 2.156000e-01 6.802000e-01 1.434000e-01 6.825000e-01 2.720000e-01 6.223000e-01 2.092000e-01 7.961000e-01 1.530000e-01 5.856000e-01 2.573000e-01 3.440000e-01 1.401000e-01
+ 9.390000e-02 1.008900e+00 3.650000e-02 6.944000e-01 9.080000e-02 4.961000e-01 7.220000e-02 6.521000e-01 -1.300000e-02 6.085000e-01 6.310000e-02 6.597000e-01 5.140000e-02 4.459000e-01
+ 5.184000e-01 2.283000e-01 5.300000e-01 6.884000e-01 7.456000e-01 6.171000e-01 6.136000e-01 5.928000e-01 7.678000e-01 6.130000e-01 6.705000e-01 5.202000e-01 3.710000e-01 3.214000e-01
+ 4.208000e-01 6.740000e-01 1.651000e-01 7.592000e-01 1.810000e-01 5.448000e-01 1.707000e-01 7.554000e-01 1.635000e-01 5.492000e-01 2.598000e-01 6.455000e-01 1.667000e-01 4.177000e-01
+ 3.130000e-01 6.465000e-01 5.908000e-01 6.924000e-01 7.664000e-01 6.262000e-01 1.717700e+00 1.500000e-02 8.510000e-02 1.904600e+00 -1.650000e-02 2.210000e-02 3.378000e-01 4.184000e-01
+ 1.145800e+00 -4.670000e-02 4.056000e-01 5.662000e-01 3.123000e-01 4.580000e-01 3.636000e-01 6.134000e-01 3.305000e-01 4.132000e-01 4.167000e-01 5.514000e-01 3.249000e-01 2.790000e-01
+-1.900000e-03 1.732000e-01 5.700000e-03 4.882000e-01 2.076000e-01 3.910000e-01 8.600000e-03 1.719800e+00 2.150000e-02 -2.580000e-02 6.730000e-02 -8.290000e-02 5.100000e-02 2.123000e-01
+ 7.800000e-03 4.615000e-01 1.181000e-01 6.590000e-01 2.587000e-01 6.352000e-01 -1.910000e-02 1.749100e+00 1.098000e-01 -1.315000e-01 4.070000e-02 -4.850000e-02 6.210000e-02 3.856000e-01
+ 7.305000e-01 1.189000e-01 1.062600e+00 1.013000e-01 1.138500e+00 1.486000e-01 1.763100e+00 -4.160000e-02 2.073100e+00 -8.720000e-02 -4.390000e-02 5.300000e-02 6.070000e-01 8.130000e-02
+ 6.199000e-01 4.779000e-01 9.528000e-01 7.463000e-01 9.741000e-01 8.633000e-01 1.677100e+00 6.200000e-02 2.027100e+00 -3.010000e-02 1.510000e-02 1.714700e+00 5.065000e-01 4.589000e-01
+ 1.130800e+00 -2.860000e-02 1.000500e+00 7.387000e-01 8.390000e-01 1.874000e-01 1.084800e+00 6.374000e-01 8.534000e-01 1.373000e-01 1.042200e+00 6.836000e-01 6.215000e-01 1.323000e-01
+ 5.093000e-01 6.038000e-01 2.874000e-01 8.162000e-01 2.267000e-01 6.181000e-01 2.646000e-01 8.490000e-01 3.365000e-01 4.648000e-01 2.815000e-01 8.254000e-01 2.280000e-01 4.243000e-01
+ 7.600000e-02 5.010000e-01 1.870000e-01 7.011000e-01 1.728000e-01 8.475000e-01 2.300000e-01 6.536000e-01 1.616000e-01 8.732000e-01 1.603000e-01 7.331000e-01 9.550000e-02 4.203000e-01
+ 4.662000e-01 1.209000e-01 7.657000e-01 1.390000e-01 8.886000e-01 1.304000e-01 1.694000e+00 4.530000e-02 -1.170000e-02 2.011100e+00 -1.940000e-02 2.170000e-02 4.422000e-01 7.900000e-02
+ 1.125400e+00 -2.520000e-02 6.776000e-01 1.793000e-01 4.287000e-01 2.804000e-01 6.003000e-01 2.737000e-01 4.541000e-01 2.370000e-01 6.775000e-01 1.850000e-01 4.369000e-01 1.284000e-01
+ 3.334000e-01 5.468000e-01 6.072000e-01 7.549000e-01 6.294000e-01 9.322000e-01 5.850000e-01 7.859000e-01 6.324000e-01 9.568000e-01 6.026000e-01 7.616000e-01 3.277000e-01 4.629000e-01
+ 3.760000e-02 7.113000e-01 4.992000e-01 5.377000e-01 6.049000e-01 5.530000e-01 1.663500e+00 8.210000e-02 -1.890000e-02 2.024100e+00 1.400000e-03 -3.000000e-03 2.534000e-01 3.568000e-01
+ 1.703000e-01 2.234000e-01 2.677000e-01 3.477000e-01 2.734000e-01 4.324000e-01 1.222000e-01 5.213000e-01 2.333000e-01 4.926000e-01 2.044000e-01 4.215000e-01 1.407000e-01 2.175000e-01
+ 1.328000e-01 2.144000e-01 2.466000e-01 2.912000e-01 3.244000e-01 2.815000e-01 1.586000e-01 3.982000e-01 3.165000e-01 3.005000e-01 3.038000e-01 2.239000e-01 1.300000e-01 1.831000e-01
+ 1.106900e+00 -1.500000e-03 1.787100e+00 -7.230000e-02 1.038900e+00 7.086000e-01 1.672800e+00 7.010000e-02 1.008300e+00 5.901000e-01 1.740500e+00 -1.880000e-02 7.498000e-01 2.096000e-01
+ 1.083700e+00 2.940000e-02 6.675000e-01 5.421000e-01 5.040000e-01 3.997000e-01 7.271000e-01 4.673000e-01 6.061000e-01 2.526000e-01 7.957000e-01 3.861000e-01 4.794000e-01 1.997000e-01
+ 6.379000e-01 3.709000e-01 9.139000e-01 6.785000e-01 1.117100e+00 6.825000e-01 9.074000e-01 6.917000e-01 1.152700e+00 6.739000e-01 9.866000e-01 5.957000e-01 5.590000e-01 3.562000e-01
+ 6.520000e-02 4.575000e-01 5.170000e-01 2.900000e-01 6.257000e-01 3.041000e-01 1.700700e+00 3.410000e-02 -1.700000e-03 2.400000e-03 -4.430000e-02 5.270000e-02 2.800000e-01 1.838000e-01
+ 5.460000e-02 4.317000e-01 2.260000e-02 8.026000e-01 2.228000e-01 7.077000e-01 1.370000e-02 1.717800e+00 -5.640000e-02 6.540000e-02 -9.100000e-03 1.070000e-02 7.470000e-02 3.874000e-01
+ 6.250000e-02 1.036100e+00 -3.160000e-02 9.835000e-01 1.020000e-02 7.331000e-01 -5.260000e-02 1.005500e+00 -5.360000e-02 7.915000e-01 -2.270000e-02 9.754000e-01 3.200000e-03 5.922000e-01
+ 8.813000e-01 2.321000e-01 4.637000e-01 1.117000e-01 2.749000e-01 2.270000e-01 4.328000e-01 1.497000e-01 3.223000e-01 1.589000e-01 4.639000e-01 1.134000e-01 2.964000e-01 1.282000e-01
+ 2.830000e-02 1.087400e+00 1.069000e-01 6.261000e-01 1.089000e-01 4.879000e-01 -3.670000e-02 7.978000e-01 -6.910000e-02 6.871000e-01 4.170000e-02 7.048000e-01 3.750000e-02 4.698000e-01
+ 1.116000e+00 -1.490000e-02 1.075900e+00 6.281000e-01 6.518000e-01 9.179000e-01 9.002000e-01 8.356000e-01 6.325000e-01 8.442000e-01 8.748000e-01 8.595000e-01 5.796000e-01 3.492000e-01
+ 3.371000e-01 7.714000e-01 7.103000e-01 8.678000e-01 8.329000e-01 8.652000e-01 1.701600e+00 3.270000e-02 -1.300000e-01 2.157200e+00 -8.280000e-02 1.834700e+00 3.773000e-01 5.367000e-01
+ 7.327000e-01 3.195000e-01 9.908000e-01 6.799000e-01 1.154500e+00 7.437000e-01 1.039000e+00 6.200000e-01 1.251800e+00 6.617000e-01 9.425000e-01 7.395000e-01 5.773000e-01 3.877000e-01
+ 3.568000e-01 4.784000e-01 7.597000e-01 3.709000e-01 7.185000e-01 5.614000e-01 1.689700e+00 4.820000e-02 -5.500000e-02 2.069800e+00 1.110000e-02 -1.070000e-02 3.974000e-01 2.757000e-01
+ 8.451000e-01 2.537000e-01 1.175900e+00 3.158000e-01 1.157700e+00 4.833000e-01 1.690800e+00 5.060000e-02 1.869400e+00 1.534000e-01 -8.700000e-02 1.834000e+00 6.629000e-01 2.139000e-01
+ 1.053400e+00 6.240000e-02 9.920000e-01 7.462000e-01 6.545000e-01 9.606000e-01 9.666000e-01 7.742000e-01 7.771000e-01 7.086000e-01 9.877000e-01 7.512000e-01 6.243000e-01 3.112000e-01
+ 7.150000e-02 1.045000e+00 1.133000e-01 8.252000e-01 4.200000e-02 7.057000e-01 1.016000e-01 8.422000e-01 7.360000e-02 6.540000e-01 7.840000e-02 8.680000e-01 7.220000e-02 5.227000e-01
+ 1.002600e+00 1.198000e-01 6.942000e-01 1.048700e+00 5.669000e-01 5.787000e-01 7.905000e-01 9.402000e-01 6.875000e-01 3.929000e-01 7.361000e-01 1.000800e+00 5.116000e-01 2.856000e-01
+ 4.843000e-01 6.102000e-01 2.827000e-01 1.441700e+00 2.176000e-01 8.856000e-01 3.216000e-01 1.399100e+00 8.600000e-02 1.002300e+00 2.466000e-01 1.485100e+00 1.914000e-01 5.923000e-01
+ 3.972000e-01 1.078000e-01 5.178000e-01 2.948000e-01 5.923000e-01 3.344000e-01 5.037000e-01 3.121000e-01 6.118000e-01 3.270000e-01 5.425000e-01 2.633000e-01 3.099000e-01 1.574000e-01
+ 3.522000e-01 3.868000e-01 5.201000e-01 6.377000e-01 5.522000e-01 7.776000e-01 5.076000e-01 6.553000e-01 6.258000e-01 7.105000e-01 4.515000e-01 7.196000e-01 3.029000e-01 3.665000e-01
+ 2.230000e-01 8.879000e-01 1.593000e-01 7.129000e-01 1.089000e-01 5.914000e-01 1.294000e-01 7.501000e-01 1.590000e-01 5.180000e-01 1.132000e-01 7.677000e-01 1.232000e-01 4.428000e-01
+ 4.409000e-01 -1.820000e-02 6.190000e-01 5.930000e-02 5.689000e-01 2.286000e-01 4.717000e-01 2.311000e-01 5.974000e-01 2.056000e-01 4.849000e-01 2.192000e-01 3.067000e-01 9.330000e-02
+ 7.310000e-02 4.657000e-01 1.022000e-01 7.503000e-01 5.510000e-02 9.277000e-01 7.300000e-02 7.852000e-01 1.180000e-01 8.651000e-01 1.219000e-01 7.260000e-01 5.260000e-02 4.397000e-01
+ 6.670000e-02 5.461000e-01 4.370000e-01 4.789000e-01 5.523000e-01 4.830000e-01 1.668600e+00 7.700000e-02 1.280000e-02 1.978700e+00 3.340000e-02 -4.030000e-02 2.290000e-01 3.043000e-01
+ 8.427000e-01 2.270000e-01 1.094300e+00 3.023000e-01 1.294900e+00 2.022000e-01 1.886300e+00 -1.803000e-01 2.044700e+00 -4.780000e-02 2.900000e-03 1.732800e+00 6.900000e-01 1.222000e-01
+ 1.126600e+00 -2.560000e-02 5.215000e-01 1.210700e+00 4.669000e-01 6.685000e-01 5.571000e-01 1.170500e+00 4.676000e-01 6.308000e-01 4.948000e-01 1.244900e+00 3.906000e-01 4.083000e-01
+ 5.600000e-01 3.640000e-01 8.255000e-01 6.307000e-01 9.831000e-01 6.679000e-01 8.842000e-01 5.626000e-01 9.357000e-01 7.488000e-01 8.266000e-01 6.312000e-01 4.738000e-01 3.681000e-01
+ 4.810000e-02 1.007400e+00 4.296000e-01 9.304000e-01 4.478000e-01 1.047200e+00 1.756000e+00 -3.380000e-02 -7.480000e-02 2.089300e+00 -4.250000e-02 5.140000e-02 2.391000e-01 5.596000e-01
+ 1.095200e+00 1.350000e-02 8.862000e-01 8.475000e-01 7.144000e-01 3.892000e-01 9.740000e-01 7.529000e-01 7.114000e-01 3.571000e-01 9.248000e-01 8.077000e-01 5.859000e-01 1.979000e-01
+ 2.006000e-01 3.454000e-01 3.986000e-01 4.469000e-01 3.417000e-01 6.387000e-01 2.808000e-01 5.793000e-01 4.392000e-01 5.374000e-01 3.424000e-01 5.096000e-01 2.043000e-01 2.865000e-01
+-2.480000e-02 8.079000e-01 3.514000e-01 7.237000e-01 4.603000e-01 7.386000e-01 8.570000e-02 1.632300e+00 6.240000e-02 1.922700e+00 -2.600000e-02 3.120000e-02 1.639000e-01 4.770000e-01
+ 4.297000e-01 1.825000e-01 5.939000e-01 3.794000e-01 7.233000e-01 3.816000e-01 6.731000e-01 2.893000e-01 7.694000e-01 3.435000e-01 7.308000e-01 2.192000e-01 3.835000e-01 1.722000e-01
+ 6.357000e-01 4.547000e-01 2.637000e-01 7.217000e-01 2.175000e-01 5.563000e-01 2.751000e-01 7.127000e-01 1.261000e-01 6.451000e-01 2.460000e-01 7.426000e-01 2.248000e-01 3.850000e-01
+ 1.124800e+00 -2.590000e-02 4.916000e-01 1.229300e+00 3.932000e-01 9.462000e-01 5.271000e-01 1.185500e+00 4.211000e-01 8.591000e-01 5.142000e-01 1.206400e+00 3.246000e-01 5.502000e-01
+ 1.131100e+00 -2.980000e-02 1.013700e+00 7.144000e-01 8.036000e-01 2.317000e-01 1.076000e+00 6.498000e-01 7.927000e-01 2.099000e-01 9.650000e-01 7.713000e-01 6.183000e-01 1.358000e-01
+ 3.026000e-01 4.889000e-01 6.114000e-01 4.941000e-01 6.889000e-01 5.464000e-01 1.615600e+00 1.361000e-01 5.000000e-04 2.004300e+00 3.290000e-02 -3.970000e-02 3.152000e-01 3.408000e-01
+-1.580000e-02 4.553000e-01 1.158000e-01 6.442000e-01 3.486000e-01 5.106000e-01 -1.019000e-01 1.854600e+00 2.450000e-02 -3.050000e-02 6.260000e-02 -7.460000e-02 1.096000e-01 3.147000e-01
+-8.610000e-02 1.207600e+00 3.407000e-01 1.090100e+00 5.373000e-01 9.981000e-01 -3.530000e-02 1.776400e+00 -1.200000e-03 2.001700e+00 2.530000e-02 1.701800e+00 1.707000e-01 6.720000e-01
+ 3.425000e-01 1.699000e-01 6.039000e-01 1.853000e-01 5.608000e-01 3.606000e-01 5.748000e-01 2.185000e-01 6.062000e-01 3.205000e-01 4.801000e-01 3.313000e-01 3.026000e-01 1.613000e-01
+ 8.624000e-01 2.389000e-01 1.129600e+00 5.381000e-01 1.292500e+00 4.896000e-01 1.810500e+00 -9.300000e-02 2.041200e+00 -4.980000e-02 4.800000e-03 1.730400e+00 6.792000e-01 2.692000e-01
+-2.360000e-02 6.987000e-01 1.493000e-01 8.415000e-01 2.933000e-01 8.121000e-01 -3.410000e-02 1.772800e+00 -7.390000e-02 2.085800e+00 -5.190000e-02 6.100000e-02 1.021000e-01 4.729000e-01
+ 1.140600e+00 -3.760000e-02 5.592000e-01 1.172900e+00 3.989000e-01 8.188000e-01 6.176000e-01 1.108100e+00 4.037000e-01 7.717000e-01 5.580000e-01 1.172800e+00 4.104000e-01 4.151000e-01
+ 4.442000e-01 2.989000e-01 6.370000e-01 5.436000e-01 8.215000e-01 5.003000e-01 7.841000e-01 3.647000e-01 7.794000e-01 5.730000e-01 6.997000e-01 4.653000e-01 3.936000e-01 2.806000e-01
+ 7.410000e-02 3.967000e-01 4.133000e-01 3.645000e-01 4.413000e-01 4.767000e-01 -4.360000e-02 1.781800e+00 4.830000e-02 -5.760000e-02 1.240000e-02 -1.260000e-02 1.716000e-01 2.843000e-01
+ 3.583000e-01 7.460000e-01 1.489000e-01 6.681000e-01 1.327000e-01 5.254000e-01 1.705000e-01 6.434000e-01 1.733000e-01 4.643000e-01 1.570000e-01 6.552000e-01 1.646000e-01 3.708000e-01
+ 5.144000e-01 7.100000e-03 7.310000e-01 1.166000e-01 8.438000e-01 1.292000e-01 1.745400e+00 -2.040000e-02 5.240000e-02 -6.220000e-02 2.000000e-04 -1.200000e-03 4.207000e-01 6.780000e-02
+ 6.726000e-01 1.323000e-01 1.144800e+00 9.590000e-02 1.362000e+00 3.870000e-02 1.169700e+00 6.840000e-02 1.369600e+00 5.110000e-02 1.182100e+00 5.370000e-02 6.784000e-01 3.530000e-02
+ 1.674000e-01 3.897000e-01 4.111000e-01 4.741000e-01 6.153000e-01 3.688000e-01 1.697900e+00 3.140000e-02 -5.520000e-02 2.065200e+00 -4.320000e-02 5.020000e-02 2.435000e-01 2.632000e-01
+ 3.785000e-01 7.282000e-01 1.824000e-01 7.266000e-01 1.605000e-01 5.580000e-01 1.435000e-01 7.714000e-01 1.922000e-01 5.056000e-01 3.317000e-01 5.522000e-01 1.777000e-01 3.997000e-01
+ 3.145000e-01 7.818000e-01 1.241000e-01 5.976000e-01 1.230000e-01 4.677000e-01 1.220000e-01 6.060000e-01 1.649000e-01 4.058000e-01 7.690000e-02 6.546000e-01 1.322000e-01 3.602000e-01
+ 2.918000e-01 2.719000e-01 6.459000e-01 2.241000e-01 7.378000e-01 2.574000e-01 1.683400e+00 6.250000e-02 2.860000e-02 1.964400e+00 -5.600000e-03 6.500000e-03 3.631000e-01 1.400000e-01
+ 1.151300e+00 -5.120000e-02 6.183000e-01 3.049000e-01 6.627000e-01 3.790000e-02 7.937000e-01 9.420000e-02 5.698000e-01 1.317000e-01 8.385000e-01 4.390000e-02 4.815000e-01 9.850000e-02
+ 1.269000e-01 9.731000e-01 4.120000e-01 1.036700e+00 5.733000e-01 9.856000e-01 1.699400e+00 3.680000e-02 -2.470000e-02 2.024300e+00 -1.100000e-02 1.743800e+00 2.299000e-01 6.160000e-01
+-9.200000e-03 6.616000e-01 5.080000e-02 9.117000e-01 1.767000e-01 9.075000e-01 2.000000e-02 1.709800e+00 -6.340000e-02 2.076700e+00 -2.020000e-02 2.450000e-02 4.400000e-02 5.188000e-01
+ 3.096000e-01 3.670000e-01 4.894000e-01 5.677000e-01 5.287000e-01 6.841000e-01 4.190000e-01 6.544000e-01 5.844000e-01 6.375000e-01 5.973000e-01 4.401000e-01 2.783000e-01 3.331000e-01
+ 5.330000e-02 8.748000e-01 1.818000e-01 1.072900e+00 2.553000e-01 1.123700e+00 -2.450000e-02 1.760300e+00 -3.000000e-03 2.009000e+00 4.320000e-02 -5.350000e-02 1.072000e-01 6.315000e-01
+ 1.054700e+00 5.700000e-02 9.906000e-01 9.390000e-02 8.846000e-01 -8.220000e-02 1.024500e+00 4.920000e-02 7.418000e-01 6.930000e-02 1.035600e+00 3.830000e-02 6.174000e-01 2.600000e-02
+ 4.399000e-01 3.581000e-01 8.365000e-01 3.885000e-01 9.079000e-01 4.923000e-01 7.725000e-01 4.573000e-01 7.929000e-01 6.507000e-01 7.882000e-01 4.434000e-01 4.279000e-01 2.874000e-01
+ 4.167000e-01 5.007000e-01 6.382000e-01 7.986000e-01 8.046000e-01 8.237000e-01 7.404000e-01 6.831000e-01 7.403000e-01 9.233000e-01 6.321000e-01 8.118000e-01 3.938000e-01 4.348000e-01
+ 1.641000e-01 5.116000e-01 5.404000e-01 4.425000e-01 5.521000e-01 5.661000e-01 1.699300e+00 3.610000e-02 2.810000e-02 1.964300e+00 6.130000e-02 -7.280000e-02 2.508000e-01 3.306000e-01
+ 6.288000e-01 4.748000e-01 3.036000e-01 1.429400e+00 1.956000e-01 1.249500e+00 3.526000e-01 1.368400e+00 2.819000e-01 1.080300e+00 2.704000e-01 1.468300e+00 2.507000e-01 6.484000e-01
+ 2.369000e-01 6.611000e-01 5.288000e-01 6.834000e-01 6.557000e-01 6.726000e-01 1.709300e+00 2.570000e-02 -3.230000e-02 2.038300e+00 -7.010000e-02 8.680000e-02 2.784000e-01 4.395000e-01
+ 8.934000e-01 2.195000e-01 4.395000e-01 9.475000e-01 3.611000e-01 6.075000e-01 3.924000e-01 1.003200e+00 3.487000e-01 5.886000e-01 4.447000e-01 9.433000e-01 2.960000e-01 4.255000e-01
+ 3.127000e-01 7.936000e-01 2.430000e-01 1.480400e+00 1.331000e-01 1.843300e+00 1.919000e-01 1.538600e+00 8.090000e-02 1.620200e+00 2.307000e-01 1.493000e+00 1.468000e-01 8.283000e-01
+ 4.778000e-01 5.678000e-01 8.714000e-01 7.462000e-01 9.055000e-01 9.499000e-01 8.423000e-01 7.811000e-01 9.141000e-01 9.747000e-01 7.781000e-01 8.622000e-01 4.561000e-01 4.868000e-01
+ 8.896000e-01 2.207000e-01 2.578000e-01 1.349300e+00 2.169000e-01 8.216000e-01 4.123000e-01 1.165400e+00 3.797000e-01 5.948000e-01 4.365000e-01 1.129600e+00 2.961000e-01 4.484000e-01
+ 1.164000e-01 5.170000e-01 9.500000e-03 1.017200e+00 1.090000e-02 1.156900e+00 -2.770000e-02 1.060900e+00 -2.000000e-03 1.186900e+00 7.900000e-02 9.352000e-01 2.830000e-02 5.596000e-01
+ 4.084000e-01 6.935000e-01 7.813000e-01 7.456000e-01 8.580000e-01 7.981000e-01 1.763400e+00 -3.380000e-02 3.120000e-02 1.962900e+00 4.640000e-02 1.680600e+00 4.056000e-01 4.892000e-01
+ 4.115000e-01 3.150000e-01 6.251000e-01 5.168000e-01 6.864000e-01 6.216000e-01 6.157000e-01 5.283000e-01 5.501000e-01 8.035000e-01 5.848000e-01 5.660000e-01 3.420000e-01 3.209000e-01
+ 5.793000e-01 5.256000e-01 2.275000e-01 6.283000e-01 2.350000e-01 4.414000e-01 3.038000e-01 5.316000e-01 2.404000e-01 4.183000e-01 2.991000e-01 5.356000e-01 2.314000e-01 3.192000e-01
+ 1.988000e-01 4.499000e-01 2.277000e-01 8.006000e-01 2.805000e-01 8.852000e-01 2.238000e-01 8.069000e-01 2.915000e-01 8.895000e-01 2.605000e-01 7.604000e-01 1.524000e-01 4.373000e-01
+ 5.380000e-02 4.115000e-01 1.528000e-01 5.627000e-01 5.870000e-02 7.814000e-01 1.204000e-01 6.025000e-01 2.900000e-03 8.575000e-01 8.210000e-02 6.511000e-01 4.320000e-02 3.794000e-01
+ 5.143000e-01 4.670000e-01 6.954000e-01 6.313000e-01 8.395000e-01 5.981000e-01 1.833600e+00 -1.195000e-01 6.730000e-02 1.927100e+00 4.320000e-02 -5.020000e-02 4.050000e-01 3.727000e-01
+ 1.093200e+00 1.700000e-02 9.920000e-01 2.131000e-01 8.428000e-01 4.310000e-02 1.037400e+00 1.593000e-01 8.353000e-01 2.400000e-02 1.127100e+00 5.930000e-02 6.564000e-01 1.860000e-02
+ 1.529000e-01 7.166000e-01 4.760000e-01 7.066000e-01 6.900000e-01 5.941000e-01 1.790100e+00 -6.710000e-02 8.600000e-03 1.987800e+00 -4.000000e-03 4.600000e-03 3.005000e-01 3.884000e-01
+ 6.414000e-01 1.886000e-01 9.415000e-01 2.036000e-01 9.966000e-01 2.832000e-01 1.774400e+00 -5.000000e-02 2.061900e+00 -7.050000e-02 2.750000e-02 -3.310000e-02 5.474000e-01 1.283000e-01
+ 1.100400e+00 1.080000e-02 1.541000e+00 1.791000e-01 9.895000e-01 8.366000e-01 1.446100e+00 2.887000e-01 9.364000e-01 7.076000e-01 1.512700e+00 2.114000e-01 7.341000e-01 2.329000e-01
+ 1.063300e+00 4.970000e-02 6.328000e-01 1.103200e+00 4.612000e-01 1.008900e+00 7.123000e-01 1.013200e+00 5.231000e-01 8.612000e-01 7.706000e-01 9.377000e-01 4.556000e-01 4.502000e-01
+ 9.390000e-02 2.852000e-01 1.502000e-01 5.744000e-01 3.306000e-01 4.972000e-01 2.500000e-02 1.699300e+00 2.940000e-02 -3.570000e-02 -5.610000e-02 6.750000e-02 1.023000e-01 3.008000e-01
+ 6.954000e-01 2.670000e-01 1.011800e+00 2.593000e-01 1.137400e+00 2.561000e-01 1.706800e+00 3.340000e-02 2.083900e+00 -9.270000e-02 1.300000e-02 -1.780000e-02 6.024000e-01 1.478000e-01
+ 1.190800e+00 -9.960000e-02 3.961000e-01 1.285800e+00 3.905000e-01 6.359000e-01 4.426000e-01 1.233500e+00 3.446000e-01 6.614000e-01 4.912000e-01 1.180000e+00 3.361000e-01 4.130000e-01
+ 5.374000e-01 3.805000e-01 8.830000e-01 5.444000e-01 1.049100e+00 5.771000e-01 9.036000e-01 5.234000e-01 1.049500e+00 5.964000e-01 9.324000e-01 4.881000e-01 5.132000e-01 3.125000e-01
+ 1.039100e+00 7.860000e-02 1.266200e+00 4.742000e-01 9.161000e-01 1.626000e-01 1.197900e+00 5.470000e-01 9.137000e-01 1.269000e-01 1.213700e+00 5.333000e-01 6.993000e-01 7.170000e-02
+ 6.458000e-01 4.649000e-01 2.589000e-01 9.217000e-01 3.329000e-01 5.323000e-01 2.311000e-01 9.548000e-01 1.899000e-01 6.802000e-01 3.269000e-01 8.420000e-01 2.573000e-01 4.138000e-01
+ 1.052100e+00 6.650000e-02 7.528000e-01 9.807000e-01 6.024000e-01 8.703000e-01 7.803000e-01 9.460000e-01 7.222000e-01 6.507000e-01 7.261000e-01 1.012000e+00 5.141000e-01 3.938000e-01
+ 5.840000e-02 5.279000e-01 8.240000e-02 8.389000e-01 8.580000e-02 9.652000e-01 7.990000e-02 8.364000e-01 1.640000e-02 1.055800e+00 1.320000e-01 7.804000e-01 6.160000e-02 4.665000e-01
+ 4.050000e-02 9.263000e-01 3.432000e-01 9.408000e-01 4.826000e-01 9.161000e-01 -1.620000e-02 1.747900e+00 -1.530000e-02 2.022200e+00 6.260000e-02 -7.430000e-02 1.872000e-01 5.699000e-01
+ 5.268000e-01 3.336000e-01 6.997000e-01 4.976000e-01 9.724000e-01 3.158000e-01 1.717100e+00 1.850000e-02 1.060000e-02 1.983800e+00 -2.130000e-02 2.460000e-02 4.486000e-01 2.498000e-01
+ 1.870000e-02 6.936000e-01 1.526000e-01 8.764000e-01 2.685000e-01 8.772000e-01 -2.400000e-02 1.761100e+00 5.660000e-02 1.933300e+00 3.520000e-02 -4.000000e-02 7.850000e-02 5.252000e-01
+ 1.103300e+00 3.900000e-03 8.285000e-01 3.832000e-01 6.649000e-01 2.246000e-01 6.963000e-01 5.402000e-01 5.935000e-01 2.886000e-01 7.667000e-01 4.494000e-01 5.473000e-01 1.339000e-01
+ 8.410000e-02 1.032800e+00 4.392000e-01 1.048400e+00 6.082000e-01 9.935000e-01 1.738900e+00 -3.800000e-03 -2.100000e-02 2.026900e+00 -1.067000e-01 1.858800e+00 2.398000e-01 6.295000e-01
+ 4.689000e-01 4.092000e-01 6.791000e-01 7.029000e-01 8.472000e-01 7.169000e-01 6.373000e-01 7.540000e-01 7.336000e-01 8.768000e-01 7.611000e-01 6.114000e-01 3.997000e-01 3.986000e-01
+ 5.368000e-01 2.121000e-01 7.638000e-01 3.146000e-01 8.883000e-01 3.072000e-01 1.674800e+00 6.920000e-02 9.300000e-03 1.989600e+00 6.700000e-03 -9.100000e-03 4.426000e-01 1.885000e-01
+ 4.240000e-01 4.900000e-01 7.232000e-01 5.014000e-01 8.591000e-01 4.838000e-01 1.772600e+00 -4.870000e-02 4.240000e-02 1.954800e+00 -1.780000e-02 2.100000e-02 4.348000e-01 2.854000e-01
+ 1.050000e-01 1.003300e+00 2.230000e-02 5.880000e-01 3.430000e-02 4.697000e-01 1.249000e-01 4.665000e-01 6.710000e-02 4.225000e-01 1.030000e-01 4.903000e-01 6.130000e-02 3.688000e-01
+ 3.111000e-01 3.751000e-01 4.651000e-01 6.121000e-01 6.362000e-01 5.681000e-01 5.020000e-01 5.630000e-01 6.580000e-01 5.583000e-01 5.059000e-01 5.646000e-01 3.052000e-01 3.081000e-01
+ 4.478000e-01 -1.400000e-03 6.501000e-01 6.110000e-02 7.771000e-01 2.590000e-02 6.717000e-01 3.060000e-02 7.627000e-01 5.250000e-02 6.493000e-01 5.720000e-02 3.869000e-01 2.020000e-02
+ 7.303000e-01 3.720000e-01 3.490000e-01 6.100000e-01 2.788000e-01 4.756000e-01 2.771000e-01 6.963000e-01 3.090000e-01 4.241000e-01 2.837000e-01 6.863000e-01 2.655000e-01 3.350000e-01
+ 5.140000e-02 1.062500e+00 3.940000e-02 4.118000e-01 -9.000000e-03 4.001000e-01 1.314000e-01 3.037000e-01 7.130000e-02 2.978000e-01 7.100000e-02 3.778000e-01 4.670000e-02 2.925000e-01
+ 4.980000e-01 5.100000e-01 8.111000e-01 5.115000e-01 8.700000e-01 5.826000e-01 1.682700e+00 5.570000e-02 7.700000e-03 1.987000e+00 3.500000e-02 -4.180000e-02 4.798000e-01 2.986000e-01
+ 2.034000e-01 6.543000e-01 2.535000e-01 1.109500e+00 3.152000e-01 1.231200e+00 2.384000e-01 1.129500e+00 3.808000e-01 1.179800e+00 2.254000e-01 1.140700e+00 1.744000e-01 6.073000e-01
+ 3.153000e-01 4.310000e-01 6.277000e-01 5.140000e-01 6.222000e-01 6.974000e-01 6.016000e-01 5.469000e-01 7.290000e-01 5.904000e-01 5.691000e-01 5.835000e-01 3.592000e-01 3.007000e-01
+ 8.961000e-01 2.310000e-02 1.048800e+00 2.182000e-01 1.305600e+00 6.030000e-02 1.701700e+00 3.300000e-02 2.162900e+00 -1.954000e-01 -5.500000e-03 6.700000e-03 6.851000e-01 5.080000e-02
+ 1.099300e+00 6.800000e-03 4.745000e-01 1.250200e+00 3.631000e-01 8.284000e-01 4.291000e-01 1.304800e+00 3.799000e-01 7.691000e-01 5.049000e-01 1.217200e+00 3.288000e-01 4.910000e-01
+ 1.124000e-01 4.549000e-01 1.585000e-01 7.363000e-01 7.680000e-02 9.628000e-01 1.012000e-01 8.069000e-01 9.650000e-02 9.527000e-01 1.245000e-01 7.763000e-01 5.500000e-02 4.688000e-01
+ 2.914000e-01 8.203000e-01 1.384000e-01 9.877000e-01 1.531000e-01 6.968000e-01 2.093000e-01 9.102000e-01 2.100000e-01 6.084000e-01 1.684000e-01 9.538000e-01 1.537000e-01 5.024000e-01
+ 2.503000e-01 5.377000e-01 4.508000e-01 7.689000e-01 6.041000e-01 7.665000e-01 4.468000e-01 7.721000e-01 6.211000e-01 7.645000e-01 4.442000e-01 7.789000e-01 2.925000e-01 4.055000e-01
+ 5.522000e-01 9.600000e-02 8.623000e-01 9.830000e-02 9.783000e-01 1.028000e-01 1.732200e+00 1.400000e-03 6.040000e-02 1.928700e+00 -1.110000e-02 1.420000e-02 4.638000e-01 1.047000e-01
+ 6.990000e-01 1.695000e-01 1.112800e+00 2.379000e-01 1.287200e+00 2.463000e-01 1.054400e+00 3.047000e-01 1.330800e+00 2.267000e-01 1.103400e+00 2.486000e-01 6.255000e-01 1.588000e-01
+ 1.206000e-01 5.177000e-01 2.957000e-01 6.815000e-01 2.742000e-01 8.485000e-01 2.064000e-01 7.865000e-01 3.213000e-01 8.104000e-01 2.374000e-01 7.500000e-01 1.375000e-01 4.325000e-01
+ 7.560000e-02 4.799000e-01 1.069000e-01 7.654000e-01 2.251000e-01 7.443000e-01 2.162000e-01 6.340000e-01 1.732000e-01 8.273000e-01 1.715000e-01 6.874000e-01 9.100000e-02 4.062000e-01
+ 2.584000e-01 8.386000e-01 7.620000e-02 7.278000e-01 1.070000e-01 5.378000e-01 7.110000e-02 7.384000e-01 7.870000e-02 5.573000e-01 4.850000e-02 7.629000e-01 1.041000e-01 4.274000e-01
+ 7.270000e-01 3.828000e-01 1.070200e+00 4.521000e-01 1.323100e+00 3.022000e-01 1.663400e+00 8.400000e-02 2.080600e+00 -9.090000e-02 7.430000e-02 1.642100e+00 6.176000e-01 2.725000e-01
+ 2.311000e-01 8.728000e-01 1.756000e-01 5.309000e-01 1.360000e-01 4.464000e-01 1.481000e-01 5.650000e-01 6.090000e-02 5.215000e-01 1.231000e-01 5.944000e-01 1.064000e-01 3.859000e-01
+ 6.257000e-01 4.252000e-01 8.745000e-01 5.017000e-01 8.145000e-01 7.101000e-01 1.767800e+00 -4.060000e-02 3.790000e-02 1.954800e+00 -3.610000e-02 1.777500e+00 4.962000e-01 3.155000e-01
+ 3.711000e-01 3.448000e-01 6.690000e-01 3.564000e-01 7.493000e-01 4.080000e-01 1.665500e+00 8.190000e-02 4.510000e-02 1.949600e+00 1.120000e-02 -1.470000e-02 3.586000e-01 2.472000e-01
+ 1.091400e+00 1.500000e-02 7.516000e-01 9.932000e-01 6.739000e-01 4.224000e-01 9.437000e-01 7.683000e-01 6.294000e-01 4.399000e-01 8.063000e-01 9.298000e-01 5.482000e-01 2.326000e-01
+ 4.256000e-01 2.780000e-02 6.816000e-01 2.220000e-02 7.522000e-01 5.630000e-02 6.717000e-01 3.390000e-02 8.997000e-01 -1.075000e-01 6.933000e-01 9.500000e-03 3.776000e-01 3.240000e-02
+ 1.028000e+00 9.510000e-02 5.619000e-01 4.529000e-01 5.648000e-01 2.058000e-01 6.508000e-01 3.510000e-01 4.077000e-01 3.744000e-01 5.686000e-01 4.476000e-01 4.042000e-01 2.186000e-01
+-4.200000e-02 7.610000e-01 1.212000e-01 9.064000e-01 1.753000e-01 9.878000e-01 5.340000e-02 1.670300e+00 -1.660000e-02 2.013600e+00 6.680000e-02 -7.970000e-02 7.740000e-02 5.239000e-01
+ 3.907000e-01 5.587000e-01 6.918000e-01 5.787000e-01 7.508000e-01 6.533000e-01 1.765900e+00 -3.900000e-02 -4.400000e-03 2.007300e+00 1.150000e-02 -1.440000e-02 3.759000e-01 3.750000e-01
+ 5.883000e-01 4.444000e-01 9.442000e-01 6.742000e-01 1.223800e+00 5.907000e-01 1.045900e+00 5.541000e-01 1.129100e+00 7.246000e-01 1.068700e+00 5.271000e-01 5.945000e-01 3.288000e-01
+ 1.261000e-01 4.338000e-01 2.383000e-01 6.331000e-01 1.393000e-01 8.796000e-01 7.800000e-02 8.233000e-01 1.462000e-01 8.875000e-01 1.771000e-01 7.083000e-01 8.940000e-02 4.235000e-01
+ 2.951000e-01 7.971000e-01 2.091000e-01 7.775000e-01 8.550000e-02 6.993000e-01 1.987000e-01 7.923000e-01 5.940000e-02 7.144000e-01 1.031000e-01 9.043000e-01 1.108000e-01 5.057000e-01
+ 1.100700e+00 5.800000e-03 5.735000e-01 5.508000e-01 4.843000e-01 3.643000e-01 5.151000e-01 6.179000e-01 4.244000e-01 4.098000e-01 6.194000e-01 4.928000e-01 4.094000e-01 2.485000e-01
+ 8.864000e-01 1.600000e-03 1.375200e+00 2.020000e-02 1.571500e+00 1.050000e-02 1.415500e+00 -3.190000e-02 1.655200e+00 -6.190000e-02 1.409500e+00 -2.720000e-02 7.862000e-01 1.790000e-02
+ 1.152100e+00 -5.580000e-02 7.192000e-01 1.030300e+00 7.694000e-01 4.801000e-01 8.318000e-01 8.979000e-01 6.653000e-01 5.582000e-01 9.178000e-01 7.947000e-01 5.415000e-01 3.093000e-01
+ 5.044000e-01 1.540000e-02 8.126000e-01 -2.700000e-03 8.906000e-01 4.030000e-02 7.963000e-01 1.780000e-02 9.854000e-01 -5.490000e-02 7.248000e-01 1.000000e-01 4.532000e-01 1.820000e-02
+-2.180000e-02 8.299000e-01 1.599000e-01 9.649000e-01 3.780000e-01 8.483000e-01 2.180000e-02 1.707200e+00 -4.570000e-02 2.054800e+00 -1.020000e-02 1.240000e-02 1.121000e-01 5.458000e-01
+ 9.160000e-02 1.036900e+00 1.755000e-01 6.179000e-01 8.330000e-02 5.723000e-01 1.340000e-01 6.709000e-01 5.480000e-02 5.914000e-01 6.290000e-02 7.533000e-01 1.001000e-01 4.355000e-01
+ 3.507000e-01 4.682000e-01 6.535000e-01 4.860000e-01 7.487000e-01 5.119000e-01 1.678900e+00 5.970000e-02 -5.820000e-02 2.068200e+00 -2.410000e-02 2.870000e-02 3.420000e-01 3.332000e-01
+ 1.034700e+00 8.190000e-02 5.016000e-01 1.232800e+00 4.562000e-01 1.218400e+00 4.686000e-01 1.269500e+00 4.205000e-01 1.132200e+00 5.104000e-01 1.223000e+00 3.742000e-01 5.762000e-01
+ 6.282000e-01 1.157000e-01 8.571000e-01 3.314000e-01 1.049100e+00 2.918000e-01 8.451000e-01 3.437000e-01 1.130400e+00 2.179000e-01 9.457000e-01 2.238000e-01 5.257000e-01 1.544000e-01
+ 1.087900e+00 1.780000e-02 5.849000e-01 9.080000e-02 4.400000e-01 1.295000e-01 5.795000e-01 1.014000e-01 4.978000e-01 4.730000e-02 6.493000e-01 2.220000e-02 4.243000e-01 4.870000e-02
+ 1.043200e+00 7.570000e-02 3.362000e-01 8.387000e-01 3.922000e-01 4.720000e-01 4.663000e-01 6.843000e-01 3.137000e-01 5.435000e-01 4.333000e-01 7.240000e-01 3.211000e-01 3.463000e-01
+ 1.763000e-01 5.283000e-01 4.547000e-01 5.701000e-01 6.420000e-01 4.928000e-01 1.767600e+00 -4.320000e-02 3.570000e-02 1.956900e+00 -3.270000e-02 3.990000e-02 2.514000e-01 3.485000e-01
+ 4.510000e-01 6.430000e-02 6.271000e-01 2.025000e-01 7.699000e-01 1.555000e-01 7.216000e-01 8.540000e-02 8.444000e-01 8.560000e-02 8.332000e-01 -4.750000e-02 4.062000e-01 6.300000e-02
+ 7.900000e-02 5.267000e-01 8.690000e-02 8.697000e-01 1.671000e-01 9.115000e-01 5.880000e-02 9.035000e-01 6.870000e-02 1.042700e+00 8.080000e-02 8.789000e-01 6.730000e-02 4.822000e-01
+ 2.188000e-01 3.369000e-01 3.669000e-01 5.010000e-01 3.112000e-01 6.960000e-01 2.375000e-01 6.537000e-01 3.345000e-01 6.777000e-01 2.797000e-01 6.031000e-01 1.660000e-01 3.429000e-01
+-9.450000e-02 1.214000e+00 -7.660000e-02 1.790700e+00 1.769000e-01 1.628300e+00 -1.730000e-02 1.752100e+00 2.320000e-02 1.976100e+00 -8.350000e-02 1.829900e+00 2.030000e-02 9.413000e-01
+ 1.486000e-01 6.692000e-01 1.849000e-01 1.112100e+00 2.417000e-01 1.229800e+00 3.131000e-01 9.606000e-01 3.608000e-01 1.105200e+00 1.707000e-01 1.123700e+00 1.341000e-01 6.086000e-01
+ 1.151900e+00 -5.420000e-02 6.338000e-01 3.940000e-01 4.542000e-01 3.527000e-01 5.905000e-01 4.522000e-01 5.814000e-01 1.835000e-01 6.035000e-01 4.320000e-01 4.249000e-01 2.042000e-01
+ 1.479000e-01 2.251000e-01 1.986000e-01 3.961000e-01 2.674000e-01 3.986000e-01 1.340000e-01 4.690000e-01 2.304000e-01 4.534000e-01 1.999000e-01 3.911000e-01 1.029000e-01 2.407000e-01
+ 5.890000e-02 1.864000e-01 3.178000e-01 2.535000e-01 4.522000e-01 2.345000e-01 9.190000e-02 1.622500e+00 -7.700000e-03 9.000000e-03 -7.670000e-02 9.070000e-02 1.755000e-01 1.388000e-01
+ 9.400000e-02 6.238000e-01 3.969000e-01 6.334000e-01 4.998000e-01 6.599000e-01 1.790100e+00 -6.620000e-02 -7.930000e-02 2.093800e+00 2.700000e-03 -4.400000e-03 2.079000e-01 3.997000e-01
+ 2.319000e-01 5.927000e-01 3.022000e-01 1.001700e+00 3.991000e-01 1.072700e+00 4.055000e-01 8.795000e-01 4.504000e-01 1.036000e+00 3.112000e-01 9.839000e-01 1.925000e-01 5.553000e-01
+ 3.815000e-01 3.332000e-01 6.462000e-01 3.913000e-01 7.058000e-01 4.557000e-01 1.709500e+00 2.520000e-02 -6.140000e-02 2.076500e+00 2.690000e-02 -3.110000e-02 3.664000e-01 2.403000e-01
+ 1.078300e+00 3.410000e-02 8.201000e-01 9.103000e-01 6.938000e-01 1.278200e+00 7.104000e-01 1.039200e+00 5.932000e-01 1.278000e+00 7.361000e-01 1.003500e+00 5.368000e-01 4.586000e-01
+ 9.140000e-02 5.724000e-01 3.504000e-01 6.380000e-01 4.148000e-01 7.028000e-01 1.752300e+00 -2.260000e-02 -2.290000e-02 2.024300e+00 -1.770000e-02 1.910000e-02 2.003000e-01 3.734000e-01
+ 4.475000e-01 6.681000e-01 3.219000e-01 5.937000e-01 2.144000e-01 5.219000e-01 2.775000e-01 6.470000e-01 1.795000e-01 5.440000e-01 2.650000e-01 6.645000e-01 2.055000e-01 3.837000e-01
+ 1.052400e+00 6.480000e-02 5.225000e-01 8.387000e-01 4.788000e-01 4.712000e-01 5.038000e-01 8.560000e-01 3.832000e-01 5.550000e-01 5.543000e-01 7.987000e-01 3.811000e-01 3.338000e-01
+ 9.849000e-01 1.433000e-01 5.062000e-01 2.203000e-01 5.441000e-01 2.260000e-02 5.696000e-01 1.404000e-01 4.719000e-01 9.920000e-02 4.997000e-01 2.260000e-01 3.843000e-01 1.097000e-01
+ 5.748000e-01 1.579000e-01 9.100000e-01 2.326000e-01 9.980000e-01 3.141000e-01 9.007000e-01 2.419000e-01 9.729000e-01 3.628000e-01 8.942000e-01 2.515000e-01 5.003000e-01 1.643000e-01
+ 4.395000e-01 4.679000e-01 8.962000e-01 2.943000e-01 9.701000e-01 3.516000e-01 1.817900e+00 -9.650000e-02 2.730000e-02 1.960900e+00 2.100000e-03 -1.700000e-03 4.865000e-01 2.228000e-01
+ 4.840000e-01 2.094000e-01 7.491000e-01 3.479000e-01 9.198000e-01 3.134000e-01 7.565000e-01 3.362000e-01 7.934000e-01 4.768000e-01 6.991000e-01 4.020000e-01 4.212000e-01 2.110000e-01
+ 4.958000e-01 1.443000e-01 8.970000e-01 8.420000e-02 8.679000e-01 2.843000e-01 7.258000e-01 2.898000e-01 8.606000e-01 3.076000e-01 7.934000e-01 2.101000e-01 4.459000e-01 1.359000e-01
+ 1.079100e+00 3.290000e-02 5.166000e-01 7.448000e-01 4.626000e-01 4.487000e-01 5.806000e-01 6.720000e-01 4.977000e-01 3.901000e-01 5.378000e-01 7.246000e-01 4.012000e-01 2.923000e-01
+ 5.550000e-02 7.274000e-01 3.617000e-01 7.326000e-01 5.602000e-01 6.382000e-01 1.931200e+00 -2.333000e-01 7.000000e-03 1.992700e+00 -7.110000e-02 8.280000e-02 2.292000e-01 4.078000e-01
+ 1.089100e+00 2.140000e-02 4.152000e-01 8.053000e-01 3.788000e-01 5.185000e-01 4.597000e-01 7.495000e-01 3.572000e-01 5.192000e-01 4.332000e-01 7.823000e-01 3.524000e-01 3.287000e-01
+ 5.581000e-01 5.780000e-02 7.462000e-01 2.461000e-01 8.369000e-01 2.983000e-01 8.010000e-01 1.832000e-01 9.550000e-01 1.747000e-01 8.798000e-01 8.960000e-02 4.791000e-01 8.590000e-02
+ 1.096500e+00 3.900000e-03 4.117000e-01 4.921000e-01 3.664000e-01 3.479000e-01 4.433000e-01 4.572000e-01 3.868000e-01 3.088000e-01 4.363000e-01 4.653000e-01 3.154000e-01 2.638000e-01
+ 1.103600e+00 -9.000000e-04 5.596000e-01 1.163800e+00 4.281000e-01 9.927000e-01 5.554000e-01 1.170500e+00 4.449000e-01 9.107000e-01 5.766000e-01 1.139700e+00 3.750000e-01 5.223000e-01
+ 4.741000e-01 2.965000e-01 7.502000e-01 4.499000e-01 7.687000e-01 6.150000e-01 6.862000e-01 5.282000e-01 8.357000e-01 5.573000e-01 6.292000e-01 5.912000e-01 4.165000e-01 2.794000e-01
+ 3.807000e-01 4.936000e-01 5.540000e-01 8.272000e-01 8.500000e-01 6.754000e-01 6.042000e-01 7.640000e-01 7.195000e-01 8.542000e-01 6.354000e-01 7.275000e-01 3.726000e-01 4.133000e-01
+ 8.364000e-01 3.270000e-02 1.186400e+00 1.968000e-01 1.436000e+00 1.226000e-01 1.236400e+00 1.442000e-01 1.383800e+00 2.110000e-01 1.171300e+00 2.177000e-01 7.280000e-01 6.490000e-02
+-1.560000e-02 4.196000e-01 4.650000e-02 5.764000e-01 6.000000e-02 6.460000e-01 3.050000e-02 5.935000e-01 4.570000e-02 6.741000e-01 -3.280000e-02 6.699000e-01 1.860000e-02 3.420000e-01
+ 5.736000e-01 5.383000e-01 2.750000e-01 1.314900e+00 2.674000e-01 7.521000e-01 3.316000e-01 1.253500e+00 2.474000e-01 7.465000e-01 2.832000e-01 1.306000e+00 2.464000e-01 4.982000e-01
+ 7.007000e-01 3.733000e-01 9.031000e-01 7.899000e-01 1.038900e+00 7.671000e-01 1.784000e+00 -5.930000e-02 2.500000e-02 1.970800e+00 2.790000e-02 1.699700e+00 4.946000e-01 4.653000e-01
+ 1.101300e+00 3.700000e-03 4.778000e-01 5.740000e-02 3.817000e-01 6.810000e-02 3.378000e-01 2.228000e-01 3.602000e-01 8.800000e-02 3.097000e-01 2.582000e-01 3.379000e-01 5.670000e-02
+-5.860000e-02 7.000000e-02 2.469000e-01 5.640000e-02 2.655000e-01 1.748000e-01 -4.000000e-03 5.900000e-03 2.730000e-02 -3.150000e-02 5.900000e-03 -7.100000e-03 1.159000e-01 4.810000e-02
+ 1.120600e+00 -1.640000e-02 5.178000e-01 1.212100e+00 3.926000e-01 1.017600e+00 5.052000e-01 1.224700e+00 3.007000e-01 1.063800e+00 4.734000e-01 1.256600e+00 3.547000e-01 5.385000e-01
+ 2.983000e-01 8.036000e-01 2.176000e-01 9.156000e-01 1.722000e-01 6.839000e-01 1.845000e-01 9.522000e-01 1.204000e-01 7.224000e-01 2.503000e-01 8.755000e-01 1.339000e-01 5.293000e-01
+ 1.063100e+00 5.430000e-02 8.058000e-01 9.270000e-01 5.826000e-01 9.213000e-01 7.334000e-01 1.003300e+00 6.819000e-01 7.321000e-01 7.287000e-01 1.006000e+00 5.168000e-01 4.008000e-01
+ 4.254000e-01 3.643000e-01 7.034000e-01 5.253000e-01 6.943000e-01 7.253000e-01 8.134000e-01 4.001000e-01 7.932000e-01 6.344000e-01 7.578000e-01 4.625000e-01 4.057000e-01 3.053000e-01
+ 4.290000e-02 7.170000e-01 2.490000e-01 8.308000e-01 4.208000e-01 7.661000e-01 -2.320000e-02 1.762900e+00 1.860000e-02 1.978800e+00 -6.870000e-02 8.130000e-02 1.143000e-01 5.234000e-01
+-3.980000e-02 6.514000e-01 1.588000e-01 7.603000e-01 2.672000e-01 7.724000e-01 -1.780000e-02 1.750400e+00 1.890000e-02 1.978200e+00 -5.620000e-02 6.840000e-02 9.140000e-02 4.407000e-01
+ 2.062000e-01 4.098000e-01 2.551000e-01 7.148000e-01 4.466000e-01 6.359000e-01 2.715000e-01 6.969000e-01 2.490000e-01 8.829000e-01 2.201000e-01 7.591000e-01 1.590000e-01 4.010000e-01
+ 3.410000e-01 4.307000e-01 7.206000e-01 3.524000e-01 7.949000e-01 4.069000e-01 1.602900e+00 1.539000e-01 -1.459000e-01 2.171600e+00 1.000000e-02 -1.250000e-02 3.856000e-01 2.496000e-01
+-2.410000e-02 6.119000e-01 2.913000e-01 5.930000e-01 2.959000e-01 7.341000e-01 6.990000e-02 1.644200e+00 -4.070000e-02 2.050400e+00 9.500000e-02 -1.145000e-01 1.324000e-01 3.856000e-01
+ 8.864000e-01 1.457000e-01 1.449500e+00 1.612000e-01 1.558100e+00 2.872000e-01 1.467800e+00 1.383000e-01 1.645400e+00 2.126000e-01 1.511700e+00 8.060000e-02 8.334000e-01 9.520000e-02
+ 4.516000e-01 3.031000e-01 6.539000e-01 4.383000e-01 8.696000e-01 3.244000e-01 1.766400e+00 -3.990000e-02 -4.020000e-02 2.044000e+00 5.800000e-02 -6.990000e-02 3.998000e-01 2.355000e-01
+ 1.500000e-03 1.039600e+00 -6.270000e-02 1.707800e+00 -1.400000e-02 1.880800e+00 6.470000e-02 1.564000e+00 1.572000e-01 1.698000e+00 1.540000e-02 1.620800e+00 1.990000e-02 9.212000e-01
+ 4.896000e-01 2.424000e-01 8.585000e-01 1.687000e-01 1.021900e+00 1.227000e-01 1.728400e+00 -1.600000e-03 3.230000e-02 1.961700e+00 -3.790000e-02 4.570000e-02 4.665000e-01 1.439000e-01
+ 1.028500e+00 2.200000e-03 1.519100e+00 1.153000e-01 1.891400e+00 -6.680000e-02 1.574600e+00 4.710000e-02 1.882900e+00 -2.460000e-02 1.660200e+00 -5.800000e-02 9.215000e-01 1.180000e-02
+ 5.700000e-01 1.949000e-01 6.662000e-01 5.775000e-01 7.901000e-01 6.218000e-01 7.056000e-01 5.306000e-01 7.961000e-01 6.351000e-01 7.592000e-01 4.700000e-01 4.177000e-01 2.949000e-01
+ 2.964000e-01 2.839000e-01 3.532000e-01 5.773000e-01 4.329000e-01 6.187000e-01 3.322000e-01 6.025000e-01 4.204000e-01 6.566000e-01 3.175000e-01 6.181000e-01 2.053000e-01 3.326000e-01
+ 1.110100e+00 -6.900000e-03 5.430000e-01 7.135000e-01 3.813000e-01 5.501000e-01 4.775000e-01 7.901000e-01 4.455000e-01 4.477000e-01 5.519000e-01 7.022000e-01 3.929000e-01 3.006000e-01
+ 4.494000e-01 3.016000e-01 6.661000e-01 5.118000e-01 8.272000e-01 5.039000e-01 7.380000e-01 4.324000e-01 8.066000e-01 5.531000e-01 7.674000e-01 3.959000e-01 4.210000e-01 2.538000e-01
+ 1.201700e+00 -1.156000e-01 3.379000e-01 6.891000e-01 3.519000e-01 4.317000e-01 4.171000e-01 5.947000e-01 2.714000e-01 5.115000e-01 3.865000e-01 6.303000e-01 3.125000e-01 3.083000e-01
+ 4.175000e-01 6.920000e-01 7.238000e-01 8.324000e-01 8.213000e-01 8.528000e-01 1.684100e+00 5.680000e-02 9.100000e-03 1.987300e+00 -1.150000e-02 1.749900e+00 4.226000e-01 4.766000e-01
+ 4.360000e-01 3.713000e-01 6.214000e-01 6.574000e-01 7.203000e-01 7.235000e-01 5.825000e-01 6.956000e-01 7.206000e-01 7.458000e-01 6.731000e-01 5.917000e-01 3.731000e-01 3.595000e-01
+ 3.848000e-01 2.801000e-01 5.886000e-01 4.593000e-01 7.417000e-01 4.387000e-01 5.876000e-01 4.592000e-01 7.156000e-01 4.903000e-01 6.483000e-01 3.882000e-01 3.588000e-01 2.423000e-01
+ 1.096400e+00 1.260000e-02 8.966000e-01 3.421000e-01 6.822000e-01 2.301000e-01 8.664000e-01 3.788000e-01 6.956000e-01 1.860000e-01 8.982000e-01 3.447000e-01 5.707000e-01 1.193000e-01
+ 4.805000e-01 5.688000e-01 5.959000e-01 1.079200e+00 6.938000e-01 1.213100e+00 6.770000e-01 9.835000e-01 7.859000e-01 1.129700e+00 7.537000e-01 8.900000e-01 3.702000e-01 5.925000e-01
+ 1.090700e+00 1.510000e-02 4.532000e-01 8.947000e-01 3.829000e-01 5.780000e-01 4.569000e-01 8.886000e-01 3.970000e-01 5.348000e-01 5.703000e-01 7.581000e-01 3.893000e-01 3.213000e-01
+ 4.580000e-02 5.139000e-01 -1.840000e-02 3.196000e-01 -3.710000e-02 3.004000e-01 9.080000e-02 1.905000e-01 -1.390000e-02 2.675000e-01 1.530000e-02 2.785000e-01 9.600000e-03 2.236000e-01
+ 1.131200e+00 -3.020000e-02 8.929000e-01 7.040000e-02 7.703000e-01 -2.560000e-02 8.951000e-01 6.580000e-02 6.570000e-01 8.810000e-02 8.834000e-01 8.170000e-02 5.877000e-01 1.170000e-02
+-5.820000e-02 8.150000e-01 3.495000e-01 7.002000e-01 4.372000e-01 7.411000e-01 -8.800000e-02 1.829700e+00 6.840000e-02 1.920900e+00 -3.980000e-02 4.890000e-02 1.860000e-01 4.308000e-01
+ 3.479000e-01 6.417000e-01 6.093000e-01 7.009000e-01 7.565000e-01 6.621000e-01 1.628200e+00 1.224000e-01 1.492000e-01 1.828500e+00 -1.960000e-02 2.300000e-02 3.743000e-01 3.924000e-01
+ 4.160000e-01 1.654000e-01 6.349000e-01 2.783000e-01 6.370000e-01 4.172000e-01 6.487000e-01 2.577000e-01 6.379000e-01 4.257000e-01 6.367000e-01 2.739000e-01 3.315000e-01 2.005000e-01
+ 7.600000e-02 1.032000e+00 3.440000e-02 1.042800e+00 -1.570000e-02 8.486000e-01 4.830000e-02 1.026200e+00 9.170000e-02 7.007000e-01 -1.420000e-02 1.099300e+00 4.420000e-02 5.973000e-01
+ 6.870000e-02 6.446000e-01 4.410000e-01 5.699000e-01 4.881000e-01 6.571000e-01 1.674200e+00 6.730000e-02 1.860000e-02 1.976100e+00 2.750000e-02 -3.180000e-02 2.199000e-01 3.772000e-01
+ 1.838000e-01 9.222000e-01 1.668000e-01 8.231000e-01 1.093000e-01 6.645000e-01 1.597000e-01 8.303000e-01 1.604000e-01 5.899000e-01 1.905000e-01 7.935000e-01 9.970000e-02 5.155000e-01
+ 5.985000e-01 2.114000e-01 8.599000e-01 2.747000e-01 8.155000e-01 4.715000e-01 1.754300e+00 -2.590000e-02 -1.890000e-02 2.017000e+00 -6.550000e-02 7.960000e-02 4.398000e-01 2.384000e-01
+ 9.812000e-01 1.464000e-01 9.873000e-01 7.388000e-01 7.230000e-01 1.253800e+00 9.414000e-01 7.927000e-01 7.645000e-01 1.026800e+00 9.855000e-01 7.375000e-01 6.030000e-01 3.864000e-01
+ 2.771000e-01 4.934000e-01 4.557000e-01 7.448000e-01 4.533000e-01 9.261000e-01 3.633000e-01 8.519000e-01 3.589000e-01 1.055700e+00 4.432000e-01 7.585000e-01 2.485000e-01 4.471000e-01
+ 2.765000e-01 5.446000e-01 4.058000e-01 8.792000e-01 5.765000e-01 8.749000e-01 4.498000e-01 8.381000e-01 6.530000e-01 8.068000e-01 4.016000e-01 8.954000e-01 2.742000e-01 4.652000e-01
+ 8.580000e-01 2.206000e-01 3.943000e-01 1.329800e+00 3.000000e-01 7.321000e-01 4.228000e-01 1.296500e+00 3.049000e-01 6.911000e-01 3.720000e-01 1.352200e+00 2.668000e-01 4.835000e-01
+ 4.178000e-01 5.168000e-01 5.407000e-01 9.438000e-01 6.523000e-01 1.029500e+00 6.214000e-01 8.466000e-01 7.659000e-01 9.212000e-01 5.444000e-01 9.391000e-01 3.318000e-01 5.214000e-01
+ 5.398000e-01 3.048000e-01 8.779000e-01 4.404000e-01 1.011700e+00 4.854000e-01 8.146000e-01 5.131000e-01 9.718000e-01 5.539000e-01 8.771000e-01 4.340000e-01 4.961000e-01 2.664000e-01
+ 1.737000e-01 1.020000e-01 2.218000e-01 2.243000e-01 3.025000e-01 1.940000e-01 2.421000e-01 1.973000e-01 2.738000e-01 2.345000e-01 1.940000e-01 2.585000e-01 1.483000e-01 1.043000e-01
+ 2.552000e-01 6.242000e-01 5.095000e-01 6.932000e-01 7.394000e-01 5.597000e-01 1.650500e+00 1.011000e-01 -2.090000e-02 2.025200e+00 4.510000e-02 -5.310000e-02 3.175000e-01 3.858000e-01
+ 6.043000e-01 4.847000e-01 2.698000e-01 7.133000e-01 1.207000e-01 6.650000e-01 1.966000e-01 7.995000e-01 3.183000e-01 4.146000e-01 3.367000e-01 6.292000e-01 2.181000e-01 3.906000e-01
+ 7.783000e-01 4.700000e-03 1.048700e+00 2.047000e-01 1.169700e+00 2.627000e-01 1.024000e+00 2.338000e-01 1.229200e+00 2.135000e-01 1.067300e+00 1.827000e-01 5.997000e-01 1.258000e-01
+ 1.056900e+00 5.520000e-02 7.505000e-01 1.643000e-01 5.627000e-01 1.735000e-01 7.464000e-01 1.701000e-01 5.599000e-01 1.603000e-01 7.443000e-01 1.746000e-01 4.886000e-01 9.790000e-02
+ 1.082000e+00 2.940000e-02 6.796000e-01 1.778000e-01 5.755000e-01 1.053000e-01 7.075000e-01 1.409000e-01 5.595000e-01 1.105000e-01 8.247000e-01 -2.300000e-03 5.124000e-01 3.890000e-02
+ 5.710000e-02 1.050300e+00 -1.100000e-03 4.328000e-01 1.623000e-01 1.709000e-01 -3.400000e-02 4.719000e-01 -1.950000e-02 3.805000e-01 -9.330000e-02 5.434000e-01 3.350000e-02 2.892000e-01
+ 1.309000e-01 3.577000e-01 1.534000e-01 6.240000e-01 1.369000e-01 7.509000e-01 2.430000e-01 5.122000e-01 2.289000e-01 6.538000e-01 2.045000e-01 5.573000e-01 1.022000e-01 3.412000e-01
+ 3.363000e-01 4.877000e-01 6.284000e-01 5.175000e-01 6.639000e-01 6.105000e-01 1.634700e+00 1.160000e-01 -7.740000e-02 2.089800e+00 1.650000e-02 -2.060000e-02 3.415000e-01 3.319000e-01
+ 8.430000e-02 3.520000e-01 5.280000e-02 6.516000e-01 6.420000e-02 7.334000e-01 5.900000e-02 6.430000e-01 6.910000e-02 7.387000e-01 1.051000e-01 5.891000e-01 3.390000e-02 3.710000e-01
+ 2.419000e-01 2.554000e-01 3.898000e-01 3.872000e-01 4.288000e-01 4.575000e-01 3.504000e-01 4.362000e-01 4.903000e-01 3.988000e-01 3.969000e-01 3.784000e-01 2.171000e-01 2.331000e-01
+ 6.307000e-01 4.740000e-01 9.798000e-01 5.770000e-01 9.152000e-01 7.924000e-01 1.912900e+00 -2.140000e-01 2.007500e+00 -1.050000e-02 -5.000000e-04 1.730100e+00 5.307000e-01 3.768000e-01
+ 4.460000e-01 -1.960000e-02 7.009000e-01 -2.960000e-02 7.206000e-01 5.770000e-02 6.415000e-01 4.150000e-02 7.693000e-01 1.520000e-02 7.258000e-01 -5.950000e-02 3.754000e-01 1.790000e-02
+ 1.226000e-01 8.072000e-01 5.387000e-01 6.823000e-01 6.626000e-01 6.829000e-01 1.689300e+00 4.540000e-02 -1.990000e-02 2.019300e+00 -3.810000e-02 4.740000e-02 2.932000e-01 4.304000e-01
+ 7.825000e-01 9.250000e-02 1.127900e+00 2.562000e-01 1.325700e+00 2.437000e-01 1.183100e+00 1.954000e-01 1.290500e+00 3.130000e-01 1.240500e+00 1.253000e-01 6.916000e-01 1.009000e-01
+ 4.689000e-01 1.481000e-01 7.357000e-01 2.329000e-01 7.442000e-01 3.792000e-01 6.927000e-01 2.833000e-01 8.208000e-01 3.054000e-01 7.793000e-01 1.818000e-01 4.031000e-01 1.613000e-01
+ 6.073000e-01 1.505000e-01 8.918000e-01 3.078000e-01 1.123600e+00 2.238000e-01 8.878000e-01 3.141000e-01 1.023600e+00 3.635000e-01 8.919000e-01 3.122000e-01 4.959000e-01 2.008000e-01
+ 1.084500e+00 2.560000e-02 1.255700e+00 4.653000e-01 9.001000e-01 1.065200e+00 1.240200e+00 4.783000e-01 9.773000e-01 7.541000e-01 1.280700e+00 4.358000e-01 6.714000e-01 3.143000e-01
+ 1.122400e+00 -2.090000e-02 6.288000e-01 1.111500e+00 5.239000e-01 8.923000e-01 7.640000e-01 9.513000e-01 6.113000e-01 7.272000e-01 6.948000e-01 1.037500e+00 4.859000e-01 4.066000e-01
+ 8.826000e-01 2.210000e-01 3.047000e-01 6.431000e-01 3.247000e-01 4.102000e-01 4.222000e-01 5.054000e-01 3.085000e-01 4.134000e-01 3.644000e-01 5.701000e-01 2.931000e-01 2.968000e-01
+-7.180000e-02 6.558000e-01 2.116000e-01 6.708000e-01 3.328000e-01 6.744000e-01 3.570000e-02 1.688300e+00 5.100000e-02 1.946700e+00 -7.030000e-02 8.540000e-02 1.075000e-01 4.049000e-01
+ 3.613000e-01 7.365000e-01 2.189000e-01 5.576000e-01 1.782000e-01 4.514000e-01 2.020000e-01 5.775000e-01 1.855000e-01 4.275000e-01 2.536000e-01 5.144000e-01 1.511000e-01 3.725000e-01
+ 1.074900e+00 4.140000e-02 9.121000e-01 1.876000e-01 7.667000e-01 5.760000e-02 9.258000e-01 1.722000e-01 7.291000e-01 8.450000e-02 8.942000e-01 2.083000e-01 5.681000e-01 8.440000e-02
+ 2.760000e-02 1.086800e+00 -4.250000e-02 5.219000e-01 3.250000e-02 3.607000e-01 2.460000e-02 4.429000e-01 8.690000e-02 2.859000e-01 -1.100000e-03 4.726000e-01 3.560000e-02 3.120000e-01
+-4.950000e-02 5.970000e-02 5.990000e-02 1.809000e-01 1.120000e-02 3.660000e-01 1.340000e-02 -1.290000e-02 7.630000e-02 -9.200000e-02 5.320000e-02 -6.370000e-02 6.000000e-03 1.293000e-01
+-7.640000e-02 1.198800e+00 3.247000e-01 1.192000e+00 3.850000e-01 1.257300e+00 -8.280000e-02 1.832100e+00 4.500000e-03 1.991200e+00 -1.990000e-02 1.757700e+00 1.692000e-01 7.139000e-01
+ 3.735000e-01 7.355000e-01 1.927000e-01 5.480000e-01 2.143000e-01 3.824000e-01 2.458000e-01 4.837000e-01 1.067000e-01 4.974000e-01 1.687000e-01 5.756000e-01 1.769000e-01 3.251000e-01
+ 2.741000e-01 5.164000e-01 3.749000e-01 8.773000e-01 4.355000e-01 9.832000e-01 4.299000e-01 8.103000e-01 5.146000e-01 9.149000e-01 3.877000e-01 8.587000e-01 2.248000e-01 4.949000e-01
+-7.800000e-03 7.603000e-01 -4.110000e-02 1.110900e+00 -3.300000e-03 1.193900e+00 -7.110000e-02 1.819100e+00 6.250000e-02 1.926700e+00 4.510000e-02 -5.140000e-02 1.150000e-02 6.132000e-01
+ 1.184000e+00 -9.540000e-02 4.044000e-01 6.970000e-01 2.919000e-01 5.550000e-01 5.266000e-01 5.560000e-01 3.209000e-01 5.018000e-01 3.657000e-01 7.450000e-01 3.112000e-01 3.396000e-01
+ 4.732000e-01 3.046000e-01 6.683000e-01 4.455000e-01 8.095000e-01 4.224000e-01 1.736500e+00 1.300000e-03 3.190000e-02 1.964000e+00 -2.400000e-03 3.500000e-03 4.121000e-01 2.376000e-01
+ 1.384000e-01 6.302000e-01 1.225000e-01 1.096600e+00 2.138000e-01 1.162500e+00 1.260000e-01 1.090700e+00 2.173000e-01 1.177400e+00 2.158000e-01 9.860000e-01 1.137000e-01 5.832000e-01
+ 5.437000e-01 1.828000e-01 8.222000e-01 3.264000e-01 1.067400e+00 2.131000e-01 8.137000e-01 3.349000e-01 1.033500e+00 2.770000e-01 8.017000e-01 3.519000e-01 5.158000e-01 1.387000e-01
+ 8.353000e-01 1.856000e-01 1.130800e+00 2.115000e-01 1.142400e+00 3.409000e-01 1.648800e+00 9.690000e-02 1.939400e+00 7.250000e-02 -3.960000e-02 4.650000e-02 6.589000e-01 1.338000e-01
+ 5.665000e-01 4.000000e-01 8.040000e-01 4.914000e-01 9.725000e-01 4.351000e-01 1.770100e+00 -5.090000e-02 -6.530000e-02 2.077400e+00 8.140000e-02 -9.730000e-02 4.666000e-01 2.948000e-01
+ 1.780000e-02 6.441000e-01 1.934000e-01 8.153000e-01 2.080000e-01 9.453000e-01 2.370000e-01 7.618000e-01 2.103000e-01 9.556000e-01 8.110000e-02 9.460000e-01 8.990000e-02 4.970000e-01
+ 3.716000e-01 5.727000e-01 5.762000e-01 9.028000e-01 7.666000e-01 8.992000e-01 6.574000e-01 8.045000e-01 8.085000e-01 8.724000e-01 6.284000e-01 8.446000e-01 3.867000e-01 4.575000e-01
+ 9.650000e-02 8.800000e-02 5.990000e-02 2.461000e-01 1.445000e-01 1.877000e-01 1.420000e-01 1.516000e-01 1.662000e-01 1.680000e-01 1.162000e-01 1.799000e-01 6.860000e-02 1.019000e-01
+ 5.024000e-01 5.639000e-01 7.328000e-01 9.492000e-01 8.707000e-01 1.036400e+00 6.174000e-01 1.084500e+00 8.037000e-01 1.142800e+00 7.794000e-01 8.958000e-01 4.507000e-01 5.156000e-01
+ 5.465000e-01 4.334000e-01 9.130000e-01 6.068000e-01 8.931000e-01 8.647000e-01 8.490000e-01 6.866000e-01 9.978000e-01 7.642000e-01 8.058000e-01 7.353000e-01 4.696000e-01 4.187000e-01
+ 1.090000e-01 7.328000e-01 2.592000e-01 1.042400e+00 3.453000e-01 1.126100e+00 2.120000e-01 1.099000e+00 3.552000e-01 1.139400e+00 2.400000e-01 1.065500e+00 1.622000e-01 5.869000e-01
+ 3.045000e-01 4.043000e-01 4.679000e-01 6.469000e-01 5.784000e-01 6.798000e-01 4.442000e-01 6.740000e-01 5.287000e-01 7.581000e-01 4.444000e-01 6.719000e-01 2.540000e-01 3.910000e-01
+ 5.597000e-01 2.298000e-01 8.849000e-01 3.551000e-01 1.034300e+00 3.724000e-01 1.094000e+00 1.069000e-01 1.116900e+00 3.034000e-01 8.565000e-01 3.830000e-01 5.458000e-01 1.637000e-01
+ 1.135400e+00 -3.660000e-02 9.930000e-01 7.328000e-01 7.396000e-01 4.829000e-01 1.096200e+00 6.153000e-01 7.750000e-01 4.016000e-01 1.057200e+00 6.568000e-01 6.032000e-01 2.289000e-01
+ 2.453000e-01 4.808000e-01 4.855000e-01 6.343000e-01 6.267000e-01 6.316000e-01 5.035000e-01 6.100000e-01 6.372000e-01 6.430000e-01 4.382000e-01 6.838000e-01 2.897000e-01 3.544000e-01
+ 1.590000e-01 9.624000e-01 5.616000e-01 1.167600e+00 7.067000e-01 1.198000e+00 1.750800e+00 -2.140000e-02 1.230000e-02 1.983100e+00 1.710000e-02 1.712500e+00 3.037000e-01 6.852000e-01
+ 1.016200e+00 1.034000e-01 4.259000e-01 4.574000e-01 3.615000e-01 3.437000e-01 3.998000e-01 4.866000e-01 3.650000e-01 3.214000e-01 4.585000e-01 4.220000e-01 3.193000e-01 2.518000e-01
+-6.100000e-02 1.181100e+00 1.040000e-02 1.661100e+00 9.170000e-02 1.701800e+00 1.160000e-02 1.715900e+00 -5.730000e-02 2.068800e+00 -1.860000e-02 1.750300e+00 4.010000e-02 9.064000e-01
+ 1.116900e+00 -1.100000e-02 1.668300e+00 7.910000e-02 1.079600e+00 2.620000e-02 1.753600e+00 -3.070000e-02 9.317000e-01 1.614000e-01 1.719400e+00 1.680000e-02 7.741000e-01 1.160000e-02
+ 5.425000e-01 5.662000e-01 4.065000e-01 7.119000e-01 1.688000e-01 7.062000e-01 3.222000e-01 8.160000e-01 7.430000e-02 7.979000e-01 1.885000e-01 9.741000e-01 2.290000e-01 4.339000e-01
+ 1.092100e+00 1.910000e-02 6.402000e-01 6.883000e-01 4.099000e-01 5.550000e-01 5.433000e-01 8.057000e-01 4.109000e-01 5.229000e-01 5.550000e-01 7.881000e-01 4.032000e-01 3.093000e-01
+ 4.276000e-01 5.982000e-01 8.580000e-01 4.580000e-01 9.666000e-01 4.712000e-01 1.702400e+00 3.460000e-02 9.500000e-03 1.983900e+00 3.450000e-02 -4.050000e-02 4.654000e-01 3.163000e-01
+ 1.115200e+00 -1.240000e-02 8.336000e-01 9.390000e-02 7.419000e-01 -2.350000e-02 7.926000e-01 1.436000e-01 6.870000e-01 2.140000e-02 8.857000e-01 3.260000e-02 5.579000e-01 2.920000e-02
+ 6.695000e-01 2.592000e-01 1.030400e+00 1.922000e-01 1.123800e+00 2.317000e-01 1.701900e+00 3.900000e-02 1.941600e+00 6.780000e-02 -6.900000e-03 9.600000e-03 5.872000e-01 1.409000e-01
+ 1.106000e+00 1.000000e-04 5.493000e-01 1.184000e+00 5.373000e-01 6.650000e-01 4.143000e-01 1.344100e+00 4.731000e-01 6.990000e-01 5.029000e-01 1.236600e+00 4.045000e-01 4.258000e-01
+ 2.478000e-01 6.458000e-01 4.043000e-01 8.337000e-01 7.193000e-01 6.084000e-01 1.761800e+00 -2.940000e-02 3.350000e-02 1.959700e+00 -3.270000e-02 4.190000e-02 2.817000e-01 4.376000e-01
+ 7.105000e-01 3.832000e-01 9.548000e-01 4.648000e-01 1.065400e+00 4.734000e-01 1.780600e+00 -5.460000e-02 1.931100e+00 7.980000e-02 -8.090000e-02 1.827600e+00 5.788000e-01 2.513000e-01
+ 9.968000e-01 1.254000e-01 3.771000e-01 8.535000e-01 3.739000e-01 5.202000e-01 4.012000e-01 8.273000e-01 3.794000e-01 4.933000e-01 3.189000e-01 9.200000e-01 3.228000e-01 3.621000e-01
+ 8.350000e-02 5.562000e-01 -2.360000e-02 9.990000e-01 1.625000e-01 8.986000e-01 -2.440000e-02 1.758700e+00 9.700000e-02 1.884700e+00 -6.000000e-03 8.400000e-03 4.000000e-03 5.609000e-01
+ 7.489000e-01 2.026000e-01 1.055900e+00 4.588000e-01 1.076900e+00 6.637000e-01 9.010000e-01 6.377000e-01 1.106200e+00 6.628000e-01 9.712000e-01 5.537000e-01 5.625000e-01 3.189000e-01
+ 6.852000e-01 4.216000e-01 2.922000e-01 1.446000e+00 2.703000e-01 1.491500e+00 3.944000e-01 1.328700e+00 2.530000e-01 1.362000e+00 3.464000e-01 1.382300e+00 2.611000e-01 7.005000e-01
+ 1.096600e+00 1.160000e-02 5.389000e-01 6.236000e-01 4.291000e-01 4.441000e-01 4.939000e-01 6.776000e-01 5.015000e-01 3.356000e-01 5.481000e-01 6.099000e-01 3.832000e-01 2.869000e-01
+ 4.674000e-01 3.366000e-01 6.300000e-01 6.509000e-01 8.707000e-01 5.648000e-01 8.028000e-01 4.547000e-01 7.896000e-01 6.800000e-01 6.882000e-01 5.842000e-01 3.931000e-01 3.419000e-01
+ 3.740000e-02 3.977000e-01 1.968000e-01 5.588000e-01 3.630000e-01 5.027000e-01 5.700000e-03 1.723800e+00 5.880000e-02 -6.790000e-02 4.370000e-02 -5.240000e-02 9.980000e-02 3.309000e-01
+-8.640000e-02 7.606000e-01 1.386000e-01 8.285000e-01 1.761000e-01 9.310000e-01 1.890000e-02 1.707500e+00 8.900000e-02 1.899300e+00 -2.260000e-02 2.660000e-02 7.470000e-02 4.927000e-01
+ 9.280000e-02 4.511000e-01 2.259000e-01 6.111000e-01 2.024000e-01 7.561000e-01 2.166000e-01 6.206000e-01 2.474000e-01 7.191000e-01 3.004000e-01 5.213000e-01 1.155000e-01 3.700000e-01
+ 1.129200e+00 -2.850000e-02 1.839300e+00 -1.256000e-01 1.044800e+00 3.002000e-01 1.690200e+00 4.770000e-02 1.060200e+00 2.250000e-01 1.756400e+00 -2.550000e-02 7.967000e-01 7.160000e-02
+ 4.576000e-01 6.481000e-01 8.842000e-01 6.654000e-01 8.400000e-01 8.585000e-01 1.776800e+00 -5.000000e-02 -2.600000e-02 2.027400e+00 -3.770000e-02 1.776600e+00 4.504000e-01 4.539000e-01
+ 9.950000e-01 1.179000e-01 4.039000e-01 5.380000e-02 3.093000e-01 8.520000e-02 3.899000e-01 6.880000e-02 3.294000e-01 5.420000e-02 3.797000e-01 8.240000e-02 3.046000e-01 4.090000e-02
+ 3.339000e-01 6.875000e-01 4.558000e-01 1.153400e+00 6.172000e-01 1.192000e+00 5.502000e-01 1.038800e+00 6.202000e-01 1.225200e+00 5.811000e-01 1.007600e+00 3.185000e-01 6.004000e-01
+-4.820000e-02 6.359000e-01 1.800000e-02 8.810000e-01 2.713000e-01 7.287000e-01 -7.900000e-03 1.742900e+00 -7.300000e-02 2.082400e+00 1.420000e-02 -1.830000e-02 5.610000e-02 4.595000e-01
+ 2.457000e-01 8.729000e-01 5.142000e-01 1.241100e+00 6.712000e-01 1.218500e+00 1.593200e+00 1.650000e-01 -7.070000e-02 2.076500e+00 -4.660000e-02 1.782600e+00 3.449000e-01 6.335000e-01
+ 1.229000e-01 4.821000e-01 1.893000e-01 7.599000e-01 1.586000e-01 9.281000e-01 1.126000e-01 8.487000e-01 1.617000e-01 9.455000e-01 1.438000e-01 8.120000e-01 8.710000e-02 4.641000e-01
+ 1.070200e+00 4.060000e-02 9.576000e-01 7.759000e-01 7.389000e-01 6.740000e-01 9.694000e-01 7.624000e-01 6.454000e-01 7.210000e-01 8.924000e-01 8.504000e-01 5.947000e-01 3.001000e-01
+ 1.169300e+00 -7.410000e-02 4.090000e-01 1.405000e-01 3.912000e-01 5.770000e-02 3.398000e-01 2.218000e-01 3.632000e-01 8.360000e-02 3.703000e-01 1.829000e-01 3.115000e-01 8.760000e-02
+ 1.105400e+00 -1.500000e-03 8.807000e-01 8.586000e-01 7.397000e-01 7.915000e-01 8.299000e-01 9.165000e-01 6.437000e-01 8.200000e-01 8.560000e-01 8.872000e-01 5.826000e-01 3.420000e-01
+ 7.364000e-01 3.606000e-01 3.379000e-01 1.395600e+00 3.809000e-01 6.602000e-01 3.742000e-01 1.349900e+00 2.766000e-01 7.495000e-01 3.453000e-01 1.385800e+00 2.625000e-01 5.014000e-01
+ 2.111000e-01 4.445000e-01 3.171000e-01 7.100000e-01 2.735000e-01 9.087000e-01 2.507000e-01 7.881000e-01 3.714000e-01 8.123000e-01 3.026000e-01 7.286000e-01 1.915000e-01 3.995000e-01
+ 1.103900e+00 1.500000e-03 1.563900e+00 1.408000e-01 1.042800e+00 2.625000e-01 1.342200e+00 4.042000e-01 9.119000e-01 3.649000e-01 1.385600e+00 3.508000e-01 7.253000e-01 1.404000e-01
+ 4.195000e-01 8.830000e-02 7.209000e-01 9.850000e-02 9.006000e-01 3.060000e-02 1.774500e+00 -4.860000e-02 4.910000e-02 -5.700000e-02 -4.180000e-02 4.740000e-02 4.286000e-01 3.880000e-02
+ 5.653000e-01 2.091000e-01 8.336000e-01 2.673000e-01 9.022000e-01 3.250000e-01 1.754900e+00 -2.750000e-02 -4.610000e-02 2.045700e+00 9.900000e-03 -1.070000e-02 4.766000e-01 1.708000e-01
+ 1.073400e+00 3.650000e-02 1.302600e+00 4.334000e-01 9.583000e-01 1.296000e-01 1.387600e+00 3.254000e-01 9.933000e-01 4.790000e-02 1.318700e+00 4.104000e-01 7.018000e-01 7.740000e-02
+ 1.012300e+00 9.170000e-02 1.274500e+00 4.656000e-01 1.394300e+00 5.197000e-01 1.596500e+00 1.580000e-01 1.963400e+00 4.470000e-02 -3.710000e-02 1.775800e+00 7.757000e-01 2.109000e-01
+ 3.312000e-01 4.549000e-01 4.088000e-01 8.564000e-01 4.991000e-01 9.237000e-01 3.774000e-01 8.868000e-01 4.258000e-01 1.034300e+00 4.400000e-01 8.127000e-01 2.453000e-01 4.791000e-01
+ 2.755000e-01 5.100000e-02 5.709000e-01 7.640000e-02 6.809000e-01 8.630000e-02 1.552000e+00 2.105000e-01 1.029000e-01 -1.244000e-01 4.730000e-02 -5.740000e-02 3.188000e-01 4.080000e-02
+ 1.982000e-01 4.741000e-01 5.539000e-01 4.219000e-01 5.941000e-01 5.179000e-01 1.645400e+00 9.840000e-02 -2.010000e-02 2.023500e+00 2.800000e-03 -4.000000e-03 2.789000e-01 2.966000e-01
+ 5.457000e-01 5.523000e-01 2.855000e-01 1.444900e+00 2.507000e-01 9.536000e-01 2.293000e-01 1.511200e+00 1.937000e-01 9.833000e-01 2.684000e-01 1.465500e+00 2.168000e-01 6.102000e-01
+-1.760000e-02 7.540000e-01 2.606000e-01 7.783000e-01 3.203000e-01 8.502000e-01 3.800000e-02 1.688500e+00 -8.700000e-03 2.010800e+00 1.600000e-03 -5.200000e-03 1.225000e-01 4.917000e-01
+ 4.159000e-01 3.145000e-01 5.517000e-01 6.108000e-01 6.129000e-01 7.097000e-01 5.831000e-01 5.751000e-01 5.852000e-01 7.686000e-01 6.010000e-01 5.550000e-01 3.130000e-01 3.593000e-01
+ 9.423000e-01 8.490000e-02 1.512500e+00 9.520000e-02 1.424700e+00 4.486000e-01 1.395900e+00 2.259000e-01 1.504800e+00 3.934000e-01 1.365400e+00 2.641000e-01 7.831000e-01 1.587000e-01
+ 4.414000e-01 3.166000e-01 7.875000e-01 2.755000e-01 8.969000e-01 2.846000e-01 1.744600e+00 -1.050000e-02 2.500000e-02 1.969800e+00 6.880000e-02 -8.310000e-02 4.572000e-01 1.650000e-01
+ 4.897000e-01 6.166000e-01 7.778000e-01 7.313000e-01 9.998000e-01 6.125000e-01 1.702800e+00 3.800000e-02 1.101000e-01 1.871500e+00 6.670000e-02 1.654300e+00 4.621000e-01 4.153000e-01
+ 3.592000e-01 1.203000e-01 5.896000e-01 1.628000e-01 7.070000e-01 1.390000e-01 6.235000e-01 1.224000e-01 6.721000e-01 1.972000e-01 6.086000e-01 1.439000e-01 3.612000e-01 6.870000e-02
+ 1.177800e+00 -8.270000e-02 1.425000e+00 2.890000e-02 1.075300e+00 -1.025000e-01 1.426900e+00 3.190000e-02 9.603000e-01 8.000000e-04 1.419000e+00 4.170000e-02 7.187000e-01 1.360000e-02
+ 2.602000e-01 7.164000e-01 7.322000e-01 5.280000e-01 7.810000e-01 6.129000e-01 1.754400e+00 -2.570000e-02 -4.170000e-02 2.048600e+00 -7.420000e-02 8.850000e-02 3.622000e-01 3.902000e-01
+ 9.853000e-01 1.293000e-01 4.345000e-01 2.152000e-01 3.437000e-01 1.954000e-01 4.099000e-01 2.471000e-01 3.284000e-01 2.032000e-01 4.200000e-01 2.330000e-01 3.041000e-01 1.577000e-01
+ 9.117000e-01 1.948000e-01 1.158700e+00 4.074000e-01 1.335200e+00 3.364000e-01 1.714900e+00 1.760000e-02 2.016600e+00 -2.550000e-02 -1.041000e-01 1.856400e+00 7.200000e-01 1.807000e-01
+ 1.140600e+00 -3.580000e-02 6.300000e-01 1.801000e-01 3.542000e-01 3.308000e-01 5.145000e-01 3.221000e-01 4.868000e-01 1.562000e-01 5.023000e-01 3.346000e-01 4.021000e-01 1.433000e-01
+ 6.267000e-01 4.142000e-01 9.552000e-01 6.795000e-01 1.028300e+00 8.437000e-01 9.689000e-01 6.655000e-01 1.223900e+00 6.474000e-01 9.202000e-01 7.226000e-01 5.495000e-01 3.955000e-01
+ 2.915000e-01 2.898000e-01 3.733000e-01 5.469000e-01 3.424000e-01 7.258000e-01 3.200000e-01 6.140000e-01 4.248000e-01 6.449000e-01 2.811000e-01 6.647000e-01 1.979000e-01 3.389000e-01
+ 7.741000e-01 1.946000e-01 1.068400e+00 4.755000e-01 1.394300e+00 3.300000e-01 1.170700e+00 3.506000e-01 1.343000e+00 4.205000e-01 1.218300e+00 2.957000e-01 6.731000e-01 2.077000e-01
+-3.680000e-02 4.017000e-01 6.590000e-02 6.122000e-01 2.108000e-01 5.843000e-01 -7.320000e-02 1.821100e+00 -7.040000e-02 8.380000e-02 -1.620000e-02 1.770000e-02 6.800000e-02 3.089000e-01
+ 3.511000e-01 8.330000e-02 6.243000e-01 4.320000e-02 6.950000e-01 7.220000e-02 6.273000e-01 3.880000e-02 8.073000e-01 -5.250000e-02 6.866000e-01 -2.930000e-02 3.668000e-01 1.780000e-02
+-2.200000e-03 4.328000e-01 9.620000e-02 5.566000e-01 1.032000e-01 6.397000e-01 7.780000e-02 5.799000e-01 1.418000e-01 6.060000e-01 1.540000e-02 6.526000e-01 4.010000e-02 3.400000e-01
+ 1.157000e-01 3.932000e-01 2.233000e-01 5.684000e-01 3.136000e-01 5.740000e-01 2.221000e-01 5.673000e-01 1.697000e-01 7.595000e-01 1.936000e-01 6.002000e-01 1.201000e-01 3.379000e-01
+ 3.699000e-01 7.311000e-01 7.281000e-01 9.934000e-01 6.734000e-01 1.311100e+00 1.734800e+00 -5.500000e-03 1.186000e-01 1.858800e+00 -1.470000e-02 1.746300e+00 3.813000e-01 6.166000e-01
+ 3.891000e-01 2.215000e-01 6.282000e-01 3.252000e-01 7.111000e-01 3.700000e-01 6.102000e-01 3.434000e-01 7.287000e-01 3.707000e-01 5.959000e-01 3.620000e-01 3.536000e-01 1.964000e-01
+ 1.167600e+00 -7.010000e-02 3.770000e-01 1.359000e+00 2.343000e-01 1.332700e+00 4.709000e-01 1.250900e+00 2.915000e-01 1.174700e+00 3.580000e-01 1.382700e+00 3.080000e-01 6.191000e-01
+ 4.259000e-01 4.327000e-01 6.630000e-01 6.835000e-01 6.497000e-01 9.032000e-01 6.208000e-01 7.378000e-01 6.525000e-01 9.307000e-01 6.688000e-01 6.760000e-01 3.576000e-01 4.249000e-01
+ 4.507000e-01 5.850000e-01 7.612000e-01 8.484000e-01 8.878000e-01 9.469000e-01 7.216000e-01 8.978000e-01 7.942000e-01 1.079400e+00 7.377000e-01 8.773000e-01 4.067000e-01 5.303000e-01
+ 1.700000e-01 9.436000e-01 1.486000e-01 1.091900e+00 5.420000e-02 8.650000e-01 2.837000e-01 9.306000e-01 1.378000e-01 7.436000e-01 1.412000e-01 1.099300e+00 1.027000e-01 5.879000e-01
+ 3.313000e-01 2.213000e-01 5.529000e-01 3.276000e-01 7.630000e-01 2.247000e-01 1.775100e+00 -4.960000e-02 -1.123000e-01 2.130400e+00 3.450000e-02 -4.120000e-02 3.109000e-01 1.995000e-01
+-3.970000e-02 8.653000e-01 2.490000e-02 1.106400e+00 1.539000e-01 1.077600e+00 2.170000e-02 1.703100e+00 -3.390000e-02 2.043400e+00 4.890000e-02 -5.940000e-02 9.900000e-03 6.578000e-01
+ 4.160000e-02 2.492000e-01 3.630000e-02 5.778000e-01 2.428000e-01 4.730000e-01 -2.500000e-03 1.733400e+00 -5.520000e-02 6.750000e-02 4.880000e-02 -5.870000e-02 4.570000e-02 2.934000e-01
+ 1.156100e+00 -6.180000e-02 4.096000e-01 1.040000e+00 4.179000e-01 5.648000e-01 4.842000e-01 9.480000e-01 3.375000e-01 6.273000e-01 3.995000e-01 1.051100e+00 3.377000e-01 3.912000e-01
+ 1.151400e+00 -5.380000e-02 5.146000e-01 1.212200e+00 4.147000e-01 8.332000e-01 5.307000e-01 1.194600e+00 3.870000e-01 8.223000e-01 5.730000e-01 1.144800e+00 3.725000e-01 4.698000e-01
+ 8.854000e-01 2.118000e-01 2.810000e-01 5.846000e-01 3.241000e-01 3.542000e-01 3.327000e-01 5.222000e-01 3.007000e-01 3.679000e-01 4.131000e-01 4.262000e-01 2.879000e-01 2.674000e-01
+ 3.638000e-01 3.804000e-01 7.445000e-01 2.988000e-01 8.495000e-01 3.202000e-01 1.906100e+00 -2.026000e-01 0.000000e+00 2.001200e+00 -8.700000e-03 9.600000e-03 3.821000e-01 2.389000e-01
+ 2.077000e-01 4.487000e-01 2.482000e-01 7.953000e-01 3.955000e-01 7.727000e-01 3.535000e-01 6.678000e-01 4.896000e-01 6.782000e-01 3.969000e-01 6.183000e-01 2.022000e-01 3.902000e-01
+ 2.329000e-01 6.285000e-01 5.094000e-01 8.113000e-01 6.033000e-01 8.956000e-01 5.496000e-01 7.599000e-01 6.561000e-01 8.529000e-01 5.272000e-01 7.862000e-01 3.007000e-01 4.596000e-01
+ 2.791000e-01 3.898000e-01 3.058000e-01 7.622000e-01 5.019000e-01 6.861000e-01 4.306000e-01 6.145000e-01 4.562000e-01 7.582000e-01 4.978000e-01 5.388000e-01 2.469000e-01 3.571000e-01
+ 7.123000e-01 4.088000e-01 4.206000e-01 5.596000e-01 2.809000e-01 4.953000e-01 3.921000e-01 5.953000e-01 3.269000e-01 4.263000e-01 3.688000e-01 6.182000e-01 2.817000e-01 3.309000e-01
+ 4.560000e-01 6.668000e-01 9.033000e-01 7.476000e-01 1.013600e+00 7.590000e-01 1.754300e+00 -2.050000e-02 1.967200e+00 3.600000e-02 -5.360000e-02 1.796700e+00 5.007000e-01 4.429000e-01
+ 8.890000e-02 1.325000e-01 4.074000e-01 1.244000e-01 5.965000e-01 4.450000e-02 1.626600e+00 1.235000e-01 -4.290000e-02 5.310000e-02 4.770000e-02 -5.700000e-02 2.280000e-01 6.230000e-02
+ 1.146600e+00 -4.920000e-02 7.559000e-01 2.360000e-02 5.875000e-01 5.060000e-02 7.465000e-01 3.910000e-02 6.143000e-01 2.300000e-03 8.245000e-01 -5.390000e-02 5.154000e-01 7.700000e-03
+ 8.170000e-02 2.001000e-01 1.828000e-01 2.480000e-01 5.410000e-02 4.648000e-01 1.310000e-01 3.071000e-01 6.150000e-02 4.636000e-01 1.292000e-01 3.150000e-01 6.720000e-02 1.890000e-01
+ 3.180000e-02 6.007000e-01 9.280000e-02 8.930000e-01 6.660000e-02 1.058400e+00 1.871000e-01 7.811000e-01 1.862000e-01 9.383000e-01 1.827000e-01 7.888000e-01 4.380000e-02 5.275000e-01
+ 4.072000e-01 3.659000e-01 5.085000e-01 7.296000e-01 5.936000e-01 8.117000e-01 6.028000e-01 6.189000e-01 5.598000e-01 8.765000e-01 6.076000e-01 6.148000e-01 3.187000e-01 3.925000e-01
+ 2.243000e-01 6.124000e-01 4.186000e-01 7.552000e-01 5.787000e-01 7.057000e-01 1.668500e+00 7.700000e-02 -8.110000e-02 2.092600e+00 -5.970000e-02 7.010000e-02 2.660000e-01 4.185000e-01
+ 1.119900e+00 -1.730000e-02 7.295000e-01 1.010000e+00 6.374000e-01 6.944000e-01 7.769000e-01 9.545000e-01 7.149000e-01 5.439000e-01 7.168000e-01 1.024200e+00 5.167000e-01 3.524000e-01
+ 4.520000e-01 4.722000e-01 6.902000e-01 7.582000e-01 9.625000e-01 6.599000e-01 7.994000e-01 6.382000e-01 8.570000e-01 8.108000e-01 7.531000e-01 6.950000e-01 4.221000e-01 4.123000e-01
+ 1.081400e+00 2.580000e-02 7.381000e-01 8.548000e-01 5.455000e-01 4.843000e-01 8.089000e-01 7.721000e-01 6.447000e-01 3.352000e-01 7.806000e-01 8.049000e-01 5.102000e-01 2.349000e-01
+-3.530000e-02 9.139000e-01 3.109000e-01 8.767000e-01 3.875000e-01 9.291000e-01 1.760000e-02 1.708500e+00 5.190000e-02 1.937300e+00 -2.270000e-02 2.460000e-02 1.649000e-01 5.366000e-01
+ 9.807000e-01 1.179000e-01 4.309000e-01 2.824000e-01 3.117000e-01 2.823000e-01 4.382000e-01 2.777000e-01 3.246000e-01 2.548000e-01 4.180000e-01 3.002000e-01 3.007000e-01 1.943000e-01
+ 3.250000e-01 2.790000e-01 7.360000e-01 1.607000e-01 7.521000e-01 2.878000e-01 1.784500e+00 -6.330000e-02 -7.240000e-02 2.082300e+00 3.470000e-02 -4.150000e-02 3.443000e-01 1.906000e-01
+ 5.688000e-01 9.040000e-02 8.446000e-01 1.970000e-01 9.281000e-01 2.649000e-01 8.492000e-01 1.929000e-01 8.796000e-01 3.376000e-01 7.574000e-01 3.030000e-01 4.745000e-01 1.294000e-01
+ 3.120000e-01 4.004000e-01 6.671000e-01 3.566000e-01 5.774000e-01 6.072000e-01 1.690600e+00 5.000000e-02 1.290000e-02 1.987400e+00 -1.380000e-02 1.650000e-02 3.382000e-01 2.667000e-01
+ 1.663000e-01 6.186000e-01 1.562000e-01 1.084300e+00 1.519000e-01 1.267800e+00 1.862000e-01 1.049100e+00 3.050000e-01 1.105300e+00 1.669000e-01 1.073600e+00 1.202000e-01 5.912000e-01
+ 3.376000e-01 5.206000e-01 4.022000e-01 9.626000e-01 6.526000e-01 8.635000e-01 5.593000e-01 7.763000e-01 5.713000e-01 9.856000e-01 6.018000e-01 7.242000e-01 3.129000e-01 4.596000e-01
+ 4.579000e-01 6.553000e-01 2.936000e-01 8.654000e-01 2.920000e-01 5.680000e-01 2.777000e-01 8.882000e-01 2.360000e-01 6.118000e-01 2.499000e-01 9.187000e-01 2.159000e-01 4.540000e-01
+ 3.084000e-01 5.384000e-01 6.887000e-01 4.607000e-01 7.591000e-01 5.193000e-01 1.674400e+00 6.190000e-02 -2.330000e-02 2.028300e+00 -1.730000e-02 2.100000e-02 3.601000e-01 3.235000e-01
+ 2.808000e-01 2.563000e-01 4.488000e-01 3.873000e-01 5.796000e-01 3.615000e-01 4.806000e-01 3.485000e-01 5.999000e-01 3.518000e-01 4.774000e-01 3.545000e-01 2.802000e-01 1.986000e-01
+ 8.380000e-02 1.481000e-01 1.604000e-01 1.943000e-01 2.514000e-01 1.399000e-01 2.079000e-01 1.384000e-01 2.309000e-01 1.706000e-01 1.796000e-01 1.728000e-01 1.045000e-01 9.790000e-02
+ 5.270000e-02 4.620000e-01 3.844000e-01 4.432000e-01 5.097000e-01 4.337000e-01 -2.350000e-02 1.761600e+00 -9.900000e-03 1.100000e-02 3.140000e-02 -3.780000e-02 1.804000e-01 3.003000e-01
+ 4.784000e-01 -3.000000e-03 7.456000e-01 2.300000e-03 7.309000e-01 1.364000e-01 6.966000e-01 5.930000e-02 7.600000e-01 1.187000e-01 7.205000e-01 3.540000e-02 4.033000e-01 3.320000e-02
+ 3.585000e-01 6.260000e-01 7.743000e-01 5.065000e-01 7.985000e-01 6.149000e-01 1.737500e+00 -1.090000e-02 4.900000e-02 1.945400e+00 -8.970000e-02 1.065000e-01 4.380000e-01 3.180000e-01
+ 5.082000e-01 5.910000e-01 7.890000e-01 7.576000e-01 8.673000e-01 8.025000e-01 1.677600e+00 6.540000e-02 -5.020000e-02 2.058300e+00 8.400000e-02 1.632800e+00 4.487000e-01 4.481000e-01
+ 3.315000e-01 5.409000e-01 6.587000e-01 5.250000e-01 6.879000e-01 6.287000e-01 1.730300e+00 0.000000e+00 8.930000e-02 1.898600e+00 4.910000e-02 -5.990000e-02 3.776000e-01 3.192000e-01
+ 7.500000e-03 7.326000e-01 2.217000e-01 8.010000e-01 2.055000e-01 9.643000e-01 6.180000e-02 1.656800e+00 -5.870000e-02 2.070400e+00 -2.560000e-02 3.100000e-02 4.460000e-02 5.755000e-01
+-4.240000e-02 7.591000e-01 3.428000e-01 6.647000e-01 3.545000e-01 7.933000e-01 5.000000e-02 1.675500e+00 -9.690000e-02 2.112200e+00 -1.800000e-03 2.400000e-03 1.535000e-01 4.441000e-01
+ 3.393000e-01 5.055000e-01 6.030000e-01 7.080000e-01 7.056000e-01 7.814000e-01 6.102000e-01 7.022000e-01 7.245000e-01 7.864000e-01 5.652000e-01 7.547000e-01 3.327000e-01 4.272000e-01
+ 6.824000e-01 4.344000e-01 1.050700e+00 6.378000e-01 1.118400e+00 7.004000e-01 1.775100e+00 -5.320000e-02 1.875000e+00 1.442000e-01 3.940000e-02 1.686500e+00 6.244000e-01 3.308000e-01
+ 3.648000e-01 5.643000e-01 7.458000e-01 4.867000e-01 8.342000e-01 5.187000e-01 1.737500e+00 -6.500000e-03 -9.810000e-02 2.117800e+00 -5.920000e-02 7.120000e-02 3.983000e-01 3.318000e-01
+ 1.129600e+00 -3.020000e-02 6.534000e-01 1.097600e+00 6.021000e-01 4.781000e-01 7.221000e-01 1.015500e+00 6.298000e-01 4.144000e-01 7.729000e-01 9.528000e-01 5.045000e-01 2.708000e-01
+ 4.052000e-01 3.131000e-01 6.783000e-01 4.436000e-01 7.699000e-01 5.071000e-01 6.499000e-01 4.775000e-01 8.500000e-01 4.314000e-01 7.182000e-01 3.959000e-01 4.053000e-01 2.391000e-01
+ 1.131800e+00 -3.150000e-02 6.668000e-01 5.677000e-01 5.198000e-01 3.855000e-01 6.295000e-01 6.142000e-01 4.733000e-01 4.171000e-01 7.019000e-01 5.303000e-01 4.325000e-01 2.588000e-01
+-1.350000e-02 9.692000e-01 6.860000e-02 1.201600e+00 2.007000e-01 1.184700e+00 -1.011000e-01 1.850400e+00 2.550000e-02 1.973700e+00 6.100000e-02 -7.320000e-02 5.340000e-02 6.952000e-01
+ 1.009400e+00 1.134000e-01 4.213000e-01 8.066000e-01 3.547000e-01 5.525000e-01 4.474000e-01 7.713000e-01 4.188000e-01 4.510000e-01 5.455000e-01 6.543000e-01 3.627000e-01 3.198000e-01
+ 4.146000e-01 4.600000e-02 7.304000e-01 4.780000e-02 8.895000e-01 -9.000000e-04 1.766900e+00 -4.180000e-02 1.940000e-02 -2.560000e-02 1.600000e-03 -2.100000e-03 4.215000e-01 2.020000e-02
+ 4.200000e-03 7.406000e-01 4.619000e-01 5.687000e-01 5.006000e-01 6.651000e-01 1.647100e+00 9.630000e-02 -8.380000e-02 2.093600e+00 1.680000e-02 -2.180000e-02 2.127000e-01 3.993000e-01
+ 8.960000e-01 2.122000e-01 1.236100e+00 2.560000e-01 1.376200e+00 2.300000e-01 1.731100e+00 5.000000e-04 1.994500e+00 4.600000e-03 -2.330000e-02 1.759100e+00 7.241000e-01 1.468000e-01
+ 1.125000e+00 -2.640000e-02 1.185100e+00 5.672000e-01 9.300000e-01 2.285000e-01 1.218800e+00 5.273000e-01 8.381000e-01 2.953000e-01 1.288500e+00 4.426000e-01 7.039000e-01 1.022000e-01
+ 9.570000e-02 2.974000e-01 4.062000e-01 1.635000e-01 4.607000e-01 1.812000e-01 3.996000e-01 1.672000e-01 4.821000e-01 1.683000e-01 3.296000e-01 2.546000e-01 2.160000e-01 1.141000e-01
+ 1.066400e+00 5.060000e-02 4.352000e-01 1.309700e+00 3.315000e-01 1.332100e+00 4.690000e-01 1.277800e+00 4.950000e-01 1.028800e+00 4.517000e-01 1.293800e+00 3.870000e-01 5.589000e-01
+ 9.807000e-01 1.493000e-01 1.107100e+00 6.114000e-01 7.685000e-01 2.679000e-01 9.559000e-01 7.915000e-01 7.832000e-01 2.189000e-01 1.040300e+00 6.888000e-01 6.295000e-01 1.212000e-01
+ 4.976000e-01 3.970000e-01 6.387000e-01 7.825000e-01 8.251000e-01 7.792000e-01 7.457000e-01 6.585000e-01 7.633000e-01 8.793000e-01 7.247000e-01 6.854000e-01 3.927000e-01 4.256000e-01
+ 7.080000e-02 8.437000e-01 1.565000e-01 1.267600e+00 2.215000e-01 1.388900e+00 2.746000e-01 1.129200e+00 2.840000e-01 1.339000e+00 1.668000e-01 1.250800e+00 1.165000e-01 7.000000e-01
+ 2.704000e-01 7.288000e-01 6.289000e-01 6.757000e-01 7.274000e-01 7.037000e-01 1.694200e+00 4.620000e-02 5.750000e-02 1.930000e+00 -1.150000e-02 1.200000e-02 3.791000e-01 3.871000e-01
+ 3.198000e-01 7.852000e-01 6.013000e-01 1.135000e+00 7.234000e-01 1.198300e+00 1.628900e+00 1.248000e-01 -4.420000e-02 2.053300e+00 8.170000e-02 1.635800e+00 3.488000e-01 6.410000e-01
+ 8.978000e-01 1.226000e-01 1.111100e+00 2.376000e-01 1.333900e+00 1.199000e-01 1.906700e+00 -2.029000e-01 1.955700e+00 4.500000e-02 1.680000e-02 -1.970000e-02 7.193000e-01 6.620000e-02
+ 3.376000e-01 4.681000e-01 7.722000e-01 3.205000e-01 7.227000e-01 5.211000e-01 1.563600e+00 1.941000e-01 5.300000e-03 1.993600e+00 -4.300000e-02 5.260000e-02 3.808000e-01 2.757000e-01
+-1.780000e-02 9.781000e-01 2.480000e-01 1.004500e+00 2.617000e-01 1.130300e+00 -5.760000e-02 1.800700e+00 3.090000e-02 1.965300e+00 -6.770000e-02 7.990000e-02 9.110000e-02 6.573000e-01
+ 3.065000e-01 4.802000e-01 5.440000e-01 6.745000e-01 5.246000e-01 8.795000e-01 4.652000e-01 7.694000e-01 5.639000e-01 8.570000e-01 4.735000e-01 7.587000e-01 2.864000e-01 4.227000e-01
+ 7.427000e-01 7.390000e-02 1.189300e+00 8.300000e-02 1.414300e+00 2.050000e-02 1.250600e+00 6.000000e-03 1.441500e+00 1.240000e-02 1.193500e+00 7.420000e-02 7.161000e-01 1.110000e-02
+ 4.666000e-01 6.226000e-01 8.800000e-01 7.976000e-01 9.467000e-01 9.688000e-01 6.725000e-01 1.039400e+00 8.872000e-01 1.067700e+00 7.361000e-01 9.614000e-01 4.290000e-01 5.515000e-01
+ 3.063000e-01 6.264000e-01 7.307000e-01 4.934000e-01 8.177000e-01 5.290000e-01 1.775700e+00 -4.860000e-02 -1.500000e-02 2.013600e+00 -5.090000e-02 6.170000e-02 4.033000e-01 3.219000e-01
+-2.300000e-03 9.057000e-01 2.291000e-01 9.915000e-01 4.589000e-01 8.647000e-01 5.000000e-04 1.731800e+00 4.500000e-03 1.990400e+00 -5.000000e-03 5.300000e-03 1.490000e-01 5.679000e-01
+ 7.854000e-01 3.098000e-01 2.999000e-01 6.876000e-01 2.856000e-01 4.767000e-01 3.644000e-01 6.130000e-01 2.626000e-01 4.918000e-01 3.000000e-01 6.889000e-01 2.698000e-01 3.384000e-01
+ 4.141000e-01 2.904000e-01 6.394000e-01 3.938000e-01 8.197000e-01 3.237000e-01 1.624800e+00 1.227000e-01 -1.530000e-02 2.018500e+00 -3.490000e-02 4.160000e-02 4.051000e-01 1.915000e-01
+ 6.392000e-01 4.688000e-01 9.039000e-01 8.416000e-01 1.061400e+00 8.376000e-01 1.598700e+00 1.593000e-01 1.972200e+00 3.310000e-02 -9.980000e-02 1.852400e+00 5.675000e-01 4.143000e-01
+ 6.436000e-01 2.330000e-02 1.019000e+00 -4.770000e-02 9.992000e-01 1.174000e-01 1.605600e+00 1.474000e-01 2.067800e+00 -8.150000e-02 5.890000e-02 -6.990000e-02 5.617000e-01 1.030000e-02
+ 1.056800e+00 5.610000e-02 9.277000e-01 3.840000e-01 7.862000e-01 1.421000e-01 9.047000e-01 4.088000e-01 7.154000e-01 1.977000e-01 9.725000e-01 3.286000e-01 5.982000e-01 1.060000e-01
+ 2.863000e-01 2.397000e-01 3.017000e-01 5.512000e-01 4.118000e-01 5.414000e-01 4.823000e-01 3.335000e-01 2.984000e-01 6.918000e-01 4.641000e-01 3.558000e-01 2.224000e-01 2.593000e-01
+ 4.731000e-01 8.560000e-02 7.429000e-01 1.325000e-01 9.045000e-01 8.170000e-02 8.110000e-01 4.620000e-02 8.391000e-01 1.784000e-01 7.406000e-01 1.352000e-01 4.483000e-01 5.340000e-02
+ 1.335000e-01 9.647000e-01 4.820000e-02 1.051100e+00 1.408000e-01 6.785000e-01 3.630000e-02 1.068100e+00 7.970000e-02 7.308000e-01 1.641000e-01 9.179000e-01 4.960000e-02 6.001000e-01
+ 4.580000e-02 9.044000e-01 3.932000e-01 8.614000e-01 5.322000e-01 8.431000e-01 1.675300e+00 6.230000e-02 3.500000e-02 1.957700e+00 -3.200000e-03 3.000000e-03 2.104000e-01 5.312000e-01
+ 1.039000e+00 7.570000e-02 7.307000e-01 1.009800e+00 5.982000e-01 1.377000e+00 7.363000e-01 1.004300e+00 6.714000e-01 1.031500e+00 8.457000e-01 8.775000e-01 5.253000e-01 4.539000e-01
+ 5.764000e-01 3.032000e-01 9.577000e-01 4.120000e-01 9.673000e-01 6.153000e-01 9.352000e-01 4.392000e-01 9.752000e-01 6.314000e-01 7.572000e-01 6.480000e-01 5.075000e-01 2.932000e-01
+ 1.140700e+00 -4.550000e-02 9.417000e-01 7.776000e-01 7.075000e-01 5.760000e-01 8.199000e-01 9.228000e-01 6.980000e-01 5.391000e-01 9.480000e-01 7.681000e-01 5.668000e-01 2.880000e-01
+-2.900000e-02 6.733000e-01 3.202000e-01 6.315000e-01 4.043000e-01 6.685000e-01 -7.370000e-02 1.813700e+00 1.229000e-01 1.851200e+00 4.170000e-02 -5.200000e-02 1.569000e-01 3.977000e-01
+ 3.265000e-01 4.894000e-01 6.227000e-01 5.130000e-01 7.186000e-01 5.417000e-01 1.765500e+00 -3.930000e-02 -4.840000e-02 2.053500e+00 9.800000e-03 -1.190000e-02 3.285000e-01 3.432000e-01
+ 1.117100e+00 -8.600000e-03 7.302000e-01 3.616000e-01 6.130000e-01 2.139000e-01 6.646000e-01 4.410000e-01 6.113000e-01 1.987000e-01 7.062000e-01 3.899000e-01 4.921000e-01 1.567000e-01
+ 1.141900e+00 -4.700000e-02 6.574000e-01 7.773000e-01 5.479000e-01 4.382000e-01 7.244000e-01 6.954000e-01 5.710000e-01 3.823000e-01 7.053000e-01 7.203000e-01 4.940000e-01 2.317000e-01
+ 4.965000e-01 1.738000e-01 7.885000e-01 2.035000e-01 9.662000e-01 1.357000e-01 1.835300e+00 -1.154000e-01 -1.560000e-02 2.021700e+00 -1.450000e-02 1.650000e-02 4.759000e-01 9.810000e-02
+ 4.544000e-01 3.766000e-01 8.270000e-01 4.520000e-01 7.125000e-01 7.868000e-01 6.932000e-01 6.157000e-01 7.669000e-01 7.423000e-01 5.673000e-01 7.614000e-01 3.870000e-01 3.691000e-01
+ 4.055000e-01 3.654000e-01 7.823000e-01 3.933000e-01 7.759000e-01 5.908000e-01 7.125000e-01 4.798000e-01 9.026000e-01 4.538000e-01 7.434000e-01 4.432000e-01 4.013000e-01 2.881000e-01
+ 5.728000e-01 2.989000e-01 8.306000e-01 3.644000e-01 9.304000e-01 3.902000e-01 1.696000e+00 4.100000e-02 -8.430000e-02 2.101000e+00 -7.130000e-02 8.580000e-02 5.000000e-01 2.026000e-01
+ 3.376000e-01 7.775000e-01 3.464000e-01 1.362100e+00 2.232000e-01 9.473000e-01 2.115000e-01 1.521300e+00 2.551000e-01 8.705000e-01 2.435000e-01 1.486300e+00 1.729000e-01 6.418000e-01
+ 1.090600e+00 1.860000e-02 6.090000e-01 1.122400e+00 4.901000e-01 5.531000e-01 5.431000e-01 1.201300e+00 5.042000e-01 5.040000e-01 5.107000e-01 1.237100e+00 4.253000e-01 3.304000e-01
+ 1.114300e+00 -1.080000e-02 5.238000e-01 4.260000e-01 3.534000e-01 4.049000e-01 5.256000e-01 4.235000e-01 4.289000e-01 2.981000e-01 5.392000e-01 4.056000e-01 3.375000e-01 2.647000e-01
+ 4.647000e-01 9.610000e-02 8.450000e-01 1.090000e-02 9.376000e-01 3.980000e-02 7.987000e-01 6.400000e-02 8.026000e-01 2.185000e-01 7.818000e-01 8.880000e-02 4.603000e-01 3.840000e-02
+ 1.721000e-01 2.183000e-01 2.819000e-01 3.280000e-01 3.557000e-01 3.364000e-01 2.338000e-01 3.847000e-01 3.266000e-01 3.775000e-01 2.295000e-01 3.909000e-01 1.535000e-01 2.003000e-01
+ 1.860000e-02 2.330000e-01 8.280000e-02 2.981000e-01 6.380000e-02 3.789000e-01 -1.660000e-02 4.184000e-01 1.610000e-01 2.707000e-01 8.580000e-02 2.988000e-01 3.820000e-02 1.848000e-01
+ 4.782000e-01 4.641000e-01 8.946000e-01 3.435000e-01 8.573000e-01 5.357000e-01 1.875700e+00 -1.691000e-01 -6.900000e-03 2.014400e+00 2.090000e-02 -2.310000e-02 4.434000e-01 3.007000e-01
+ 3.309000e-01 4.250000e-01 4.928000e-01 6.981000e-01 6.051000e-01 7.404000e-01 6.155000e-01 5.528000e-01 6.382000e-01 7.205000e-01 5.233000e-01 6.605000e-01 3.175000e-01 3.624000e-01
+ 2.536000e-01 1.080000e-01 5.554000e-01 1.227000e-01 7.170000e-01 7.060000e-02 1.696900e+00 4.170000e-02 -1.440000e-02 1.760000e-02 -1.016000e-01 1.216000e-01 3.275000e-01 4.920000e-02
+ 3.390000e-01 3.020000e-01 5.153000e-01 4.942000e-01 5.936000e-01 5.545000e-01 5.314000e-01 4.704000e-01 6.225000e-01 5.370000e-01 5.093000e-01 4.972000e-01 3.172000e-01 2.615000e-01
+ 1.167900e+00 -7.670000e-02 1.151900e+00 5.657000e-01 8.234000e-01 1.057900e+00 1.061100e+00 6.778000e-01 6.292000e-01 1.053300e+00 1.050300e+00 6.942000e-01 6.490000e-01 3.211000e-01
+ 4.128000e-01 6.886000e-01 7.720000e-01 6.333000e-01 8.191000e-01 7.276000e-01 1.696900e+00 4.170000e-02 -1.400000e-03 1.999100e+00 -2.770000e-02 1.763800e+00 4.395000e-01 3.880000e-01
+ 2.401000e-01 8.456000e-01 2.801000e-01 1.440500e+00 3.156000e-01 1.645200e+00 4.983000e-01 1.182000e+00 4.511000e-01 1.513000e+00 4.349000e-01 1.260300e+00 2.223000e-01 7.598000e-01
+ 3.190000e-02 2.327000e-01 1.329000e-01 4.503000e-01 1.820000e-01 5.382000e-01 1.320000e-01 1.579700e+00 6.880000e-02 -8.450000e-02 -1.080000e-02 1.390000e-02 8.020000e-02 2.422000e-01
+ 4.039000e-01 3.573000e-01 6.585000e-01 5.334000e-01 6.388000e-01 7.320000e-01 6.293000e-01 5.603000e-01 6.803000e-01 7.047000e-01 5.667000e-01 6.358000e-01 3.484000e-01 3.435000e-01
+ 1.027700e+00 8.760000e-02 5.360000e-01 1.174600e+00 3.294000e-01 7.892000e-01 3.724000e-01 1.366100e+00 2.953000e-01 7.905000e-01 4.678000e-01 1.253200e+00 3.132000e-01 4.753000e-01
+ 6.950000e-02 4.076000e-01 1.708000e-01 5.636000e-01 1.306000e-01 7.205000e-01 -1.290000e-02 7.861000e-01 1.415000e-01 7.203000e-01 6.850000e-02 6.867000e-01 5.100000e-02 3.835000e-01
+ 5.578000e-01 4.956000e-01 8.414000e-01 5.372000e-01 9.265000e-01 5.695000e-01 1.720000e+00 1.960000e-02 7.740000e-02 1.906200e+00 -9.720000e-02 1.850300e+00 4.662000e-01 3.445000e-01
+ 2.043000e-01 1.528000e-01 3.941000e-01 1.483000e-01 4.314000e-01 1.899000e-01 4.244000e-01 1.136000e-01 5.053000e-01 1.130000e-01 4.428000e-01 8.950000e-02 2.396000e-01 7.130000e-02
+ 3.115000e-01 6.206000e-01 5.196000e-01 9.349000e-01 4.160000e-01 1.275600e+00 4.773000e-01 9.806000e-01 5.755000e-01 1.103300e+00 3.650000e-01 1.116200e+00 2.662000e-01 5.800000e-01
+ 1.036700e+00 8.220000e-02 8.868000e-01 8.528000e-01 7.260000e-01 4.464000e-01 1.059900e+00 6.539000e-01 6.681000e-01 4.769000e-01 9.639000e-01 7.606000e-01 5.826000e-01 2.310000e-01
+ 1.069800e+00 4.080000e-02 4.507000e-01 2.553000e-01 3.520000e-01 2.347000e-01 4.403000e-01 2.715000e-01 3.898000e-01 1.776000e-01 4.880000e-01 2.127000e-01 3.393000e-01 1.490000e-01
+ 9.398000e-01 9.490000e-02 1.514900e+00 9.620000e-02 1.657200e+00 1.844000e-01 1.592300e+00 4.300000e-03 1.785300e+00 7.150000e-02 1.490300e+00 1.233000e-01 8.673000e-01 6.500000e-02
+ 2.916000e-01 8.154000e-01 2.496000e-01 7.557000e-01 7.570000e-02 7.302000e-01 2.293000e-01 7.797000e-01 1.223000e-01 6.540000e-01 1.639000e-01 8.590000e-01 1.455000e-01 4.776000e-01
+ 1.027000e-01 1.016100e+00 1.556000e-01 1.566700e+00 1.183000e-01 9.786000e-01 1.365000e-01 1.589500e+00 1.133000e-01 9.559000e-01 1.248000e-01 1.605400e+00 8.940000e-02 6.949000e-01
+ 1.103500e+00 3.500000e-03 5.619000e-01 1.930000e-01 4.270000e-01 1.961000e-01 5.009000e-01 2.624000e-01 4.615000e-01 1.391000e-01 5.809000e-01 1.712000e-01 3.970000e-01 1.167000e-01
+ 1.000000e-03 9.440000e-01 3.068000e-01 9.555000e-01 4.642000e-01 9.027000e-01 -1.152000e-01 1.865900e+00 -8.080000e-02 2.098800e+00 -8.500000e-03 1.020000e-02 1.939000e-01 5.414000e-01
+ 4.455000e-01 6.606000e-01 2.377000e-01 6.792000e-01 2.448000e-01 4.713000e-01 2.568000e-01 6.523000e-01 3.088000e-01 3.794000e-01 2.070000e-01 7.139000e-01 1.991000e-01 3.831000e-01
+ 3.431000e-01 3.872000e-01 5.262000e-01 6.181000e-01 5.925000e-01 7.169000e-01 6.176000e-01 5.152000e-01 6.126000e-01 7.107000e-01 4.996000e-01 6.554000e-01 3.275000e-01 3.312000e-01
+ 3.390000e-02 6.709000e-01 4.034000e-01 6.058000e-01 4.849000e-01 6.530000e-01 1.696000e+00 4.940000e-02 -7.460000e-02 2.088600e+00 -6.100000e-03 7.900000e-03 2.012000e-01 3.928000e-01
+-3.340000e-02 1.136700e+00 8.860000e-02 1.330500e+00 2.922000e-01 1.235300e+00 1.830000e-02 1.712800e+00 -8.160000e-02 2.100200e+00 3.500000e-03 1.726300e+00 7.930000e-02 7.498000e-01
+ 1.158900e+00 -6.460000e-02 4.903000e-01 1.257400e+00 5.872000e-01 4.286000e-01 5.796000e-01 1.149900e+00 5.293000e-01 4.681000e-01 5.713000e-01 1.158500e+00 4.171000e-01 3.362000e-01
+ 1.061400e+00 5.460000e-02 4.624000e-01 4.138000e-01 4.538000e-01 2.395000e-01 6.261000e-01 2.194000e-01 4.383000e-01 2.373000e-01 5.559000e-01 2.980000e-01 3.813000e-01 1.803000e-01
+-6.870000e-02 1.177000e+00 3.070000e-01 1.094200e+00 3.066000e-01 1.239500e+00 8.420000e-02 1.636600e+00 -2.100000e-03 1.999800e+00 -3.140000e-02 1.768700e+00 1.517000e-01 6.749000e-01
+ 4.358000e-01 5.687000e-01 7.750000e-01 5.402000e-01 9.538000e-01 4.687000e-01 1.750700e+00 -2.560000e-02 1.563000e-01 1.817000e+00 6.030000e-02 -7.160000e-02 4.426000e-01 3.335000e-01
+ 3.493000e-01 3.879000e-01 4.468000e-01 7.277000e-01 5.066000e-01 8.343000e-01 6.364000e-01 5.005000e-01 6.528000e-01 6.812000e-01 4.985000e-01 6.694000e-01 3.007000e-01 3.705000e-01
+ 3.451000e-01 3.304000e-01 4.110000e-01 6.678000e-01 4.958000e-01 7.328000e-01 4.974000e-01 5.712000e-01 4.849000e-01 7.611000e-01 3.385000e-01 7.543000e-01 2.568000e-01 3.634000e-01
+ 6.628000e-01 4.250000e-01 2.748000e-01 2.046000e-01 1.555000e-01 2.684000e-01 2.439000e-01 2.413000e-01 2.985000e-01 8.700000e-02 2.819000e-01 1.962000e-01 2.311000e-01 1.262000e-01
+ 5.381000e-01 2.008000e-01 9.185000e-01 2.281000e-01 1.066400e+00 2.308000e-01 9.062000e-01 2.368000e-01 9.814000e-01 3.535000e-01 8.856000e-01 2.679000e-01 5.252000e-01 1.371000e-01
+ 1.112900e+00 -9.100000e-03 8.964000e-01 8.369000e-01 6.871000e-01 7.348000e-01 8.873000e-01 8.529000e-01 7.308000e-01 6.121000e-01 8.880000e-01 8.495000e-01 5.774000e-01 3.174000e-01
+ 9.408000e-01 1.572000e-01 1.180300e+00 3.368000e-01 1.296200e+00 3.425000e-01 1.684700e+00 5.910000e-02 2.001600e+00 2.300000e-03 4.320000e-02 1.685900e+00 7.190000e-01 1.595000e-01
+ 1.581000e-01 3.713000e-01 3.274000e-01 4.837000e-01 2.731000e-01 6.740000e-01 4.187000e-01 3.731000e-01 3.721000e-01 5.650000e-01 3.401000e-01 4.673000e-01 1.892000e-01 2.795000e-01
+ 3.393000e-01 1.388000e-01 2.235000e-01 3.140000e-02 7.320000e-02 1.648000e-01 2.143000e-01 4.110000e-02 1.466000e-01 7.950000e-02 1.840000e-01 7.610000e-02 1.639000e-01 4.260000e-02
+ 2.449000e-01 1.485000e-01 1.748000e-01 3.990000e-02 7.900000e-02 1.229000e-01 1.639000e-01 5.250000e-02 1.908000e-01 -1.550000e-02 7.440000e-02 1.631000e-01 1.260000e-01 5.300000e-02
+ 3.539000e-01 7.224000e-01 4.408000e-01 1.274400e+00 6.796000e-01 1.237900e+00 4.989000e-01 1.199600e+00 6.023000e-01 1.362300e+00 4.924000e-01 1.209500e+00 2.847000e-01 6.989000e-01
+ 4.341000e-01 4.468000e-01 6.116000e-01 7.835000e-01 7.314000e-01 8.503000e-01 6.383000e-01 7.511000e-01 8.338000e-01 7.523000e-01 6.846000e-01 6.988000e-01 3.918000e-01 4.062000e-01
+ 5.380000e-01 4.697000e-01 7.407000e-01 8.628000e-01 7.885000e-01 1.048800e+00 7.124000e-01 8.969000e-01 9.390000e-01 9.041000e-01 7.319000e-01 8.754000e-01 4.184000e-01 5.099000e-01
+ 2.920000e-01 8.900000e-02 4.256000e-01 1.747000e-01 4.704000e-01 2.201000e-01 4.925000e-01 9.680000e-02 4.691000e-01 2.292000e-01 4.426000e-01 1.539000e-01 2.753000e-01 6.600000e-02
+ 4.122000e-01 8.940000e-02 5.834000e-01 2.571000e-01 6.954000e-01 2.638000e-01 1.677400e+00 6.480000e-02 -2.700000e-03 2.000000e-03 1.870000e-02 -2.020000e-02 3.243000e-01 1.595000e-01
+-5.000000e-04 1.120800e+00 6.350000e-02 1.665100e+00 -5.980000e-02 1.582700e+00 -7.100000e-03 1.746900e+00 8.840000e-02 1.332500e+00 1.032000e-01 1.617900e+00 3.190000e-02 8.864000e-01
+ 2.250000e-02 4.186000e-01 2.250000e-02 2.158000e-01 4.480000e-02 1.556000e-01 6.030000e-02 1.744000e-01 6.030000e-02 1.389000e-01 -2.940000e-02 2.825000e-01 1.240000e-02 1.834000e-01
+ 1.856000e-01 3.467000e-01 6.820000e-02 2.194000e-01 4.480000e-02 2.059000e-01 9.490000e-02 1.882000e-01 3.840000e-02 2.095000e-01 5.270000e-02 2.388000e-01 7.130000e-02 1.545000e-01
+ 5.003000e-01 2.536000e-01 1.001900e+00 1.310000e-01 1.054300e+00 2.531000e-01 8.935000e-01 2.608000e-01 1.017600e+00 3.172000e-01 9.822000e-01 1.566000e-01 5.335000e-01 1.295000e-01
+ 4.205000e-01 6.878000e-01 8.430000e-02 1.679500e+00 1.585000e-01 9.973000e-01 2.664000e-01 1.464800e+00 1.663000e-01 9.523000e-01 3.439000e-01 1.375300e+00 1.936000e-01 6.086000e-01
+ 7.230000e-01 2.897000e-01 1.061700e+00 5.399000e-01 1.291800e+00 5.118000e-01 1.153700e+00 4.307000e-01 1.208600e+00 6.427000e-01 9.571000e-01 6.620000e-01 6.198000e-01 3.023000e-01
+ 2.124000e-01 6.169000e-01 4.337000e-01 8.492000e-01 4.279000e-01 1.041800e+00 4.311000e-01 8.475000e-01 4.554000e-01 1.033700e+00 3.460000e-01 9.528000e-01 2.282000e-01 5.165000e-01
+ 1.087000e+00 2.050000e-02 3.814000e-01 9.184000e-01 3.311000e-01 6.046000e-01 3.696000e-01 9.346000e-01 3.396000e-01 5.683000e-01 4.533000e-01 8.328000e-01 3.132000e-01 3.887000e-01
+ 1.122500e+00 -2.000000e-02 7.035000e-01 1.028900e+00 5.001000e-01 6.363000e-01 7.524000e-01 9.642000e-01 5.688000e-01 5.181000e-01 7.309000e-01 9.958000e-01 4.889000e-01 3.028000e-01
+ 1.122700e+00 -2.120000e-02 7.345000e-01 1.126000e-01 5.620000e-01 1.254000e-01 7.846000e-01 5.580000e-02 5.239000e-01 1.548000e-01 7.256000e-01 1.218000e-01 4.972000e-01 5.870000e-02
+ 2.307000e-01 7.521000e-01 4.215000e-01 8.968000e-01 4.895000e-01 9.583000e-01 1.706200e+00 3.010000e-02 2.510000e-02 1.976100e+00 5.130000e-02 -5.990000e-02 2.203000e-01 5.587000e-01
+ 3.168000e-01 1.169000e-01 7.908000e-01 -7.260000e-02 8.958000e-01 -5.690000e-02 1.596400e+00 1.594000e-01 -4.350000e-02 5.030000e-02 -4.990000e-02 5.970000e-02 3.999000e-01 1.530000e-02
+ 1.098600e+00 1.060000e-02 1.170200e+00 5.680000e-01 8.732000e-01 2.272000e-01 1.142800e+00 5.946000e-01 8.219000e-01 2.528000e-01 1.195700e+00 5.319000e-01 6.724000e-01 1.097000e-01
+ 1.823000e-01 6.265000e-01 1.940000e-01 1.089600e+00 2.839000e-01 1.166500e+00 2.785000e-01 9.865000e-01 3.379000e-01 1.124600e+00 2.203000e-01 1.059100e+00 1.262000e-01 6.122000e-01
+ 4.150000e-02 7.618000e-01 2.864000e-01 8.324000e-01 4.110000e-01 8.252000e-01 4.500000e-03 1.725100e+00 -2.590000e-02 2.027700e+00 -2.960000e-02 3.420000e-02 1.374000e-01 5.252000e-01
+ 5.909000e-01 4.002000e-01 8.241000e-01 7.434000e-01 1.137400e+00 6.116000e-01 9.519000e-01 5.897000e-01 1.061400e+00 7.307000e-01 8.974000e-01 6.602000e-01 5.347000e-01 3.598000e-01
+ 7.907000e-01 2.081000e-01 1.288000e+00 2.727000e-01 1.441700e+00 3.342000e-01 1.260200e+00 3.046000e-01 1.501900e+00 2.953000e-01 1.235200e+00 3.337000e-01 7.069000e-01 1.993000e-01
+ 5.221000e-01 5.633000e-01 9.389000e-01 7.340000e-01 9.714000e-01 9.561000e-01 9.044000e-01 7.730000e-01 1.116400e+00 8.080000e-01 9.204000e-01 7.541000e-01 5.285000e-01 4.408000e-01
+ 1.143700e+00 -4.720000e-02 5.848000e-01 9.845000e-01 6.145000e-01 3.859000e-01 6.835000e-01 8.577000e-01 5.857000e-01 3.928000e-01 6.892000e-01 8.599000e-01 4.808000e-01 2.604000e-01
+ 1.652000e-01 5.220000e-01 7.564000e-01 1.959000e-01 7.352000e-01 3.619000e-01 1.798300e+00 -8.000000e-02 -6.110000e-02 2.072900e+00 -3.170000e-02 3.590000e-02 3.185000e-01 2.556000e-01
+ 2.429000e-01 8.192000e-01 5.522000e-01 8.254000e-01 6.584000e-01 8.397000e-01 1.758500e+00 -2.970000e-02 1.117000e-01 1.871200e+00 2.010000e-02 1.704900e+00 3.055000e-01 5.041000e-01
+ 1.027500e+00 9.290000e-02 7.770000e-01 9.647000e-01 7.027000e-01 6.231000e-01 8.090000e-01 9.281000e-01 7.108000e-01 5.548000e-01 9.417000e-01 7.737000e-01 5.562000e-01 3.125000e-01
+ 2.098000e-01 4.697000e-01 2.148000e-01 8.691000e-01 2.709000e-01 9.603000e-01 2.065000e-01 8.809000e-01 1.751000e-01 1.090800e+00 1.634000e-01 9.365000e-01 1.426000e-01 4.818000e-01
+ 8.182000e-01 2.860000e-01 1.228700e+00 2.121000e-01 1.275100e+00 2.980000e-01 1.731500e+00 2.500000e-03 2.029300e+00 -3.240000e-02 2.870000e-02 1.699900e+00 6.670000e-01 1.885000e-01
+ 1.416000e-01 9.664000e-01 7.030000e-02 8.976000e-01 1.443000e-01 5.991000e-01 4.980000e-02 9.250000e-01 1.007000e-01 6.328000e-01 6.130000e-02 9.041000e-01 7.600000e-02 5.271000e-01
+ 6.420000e-01 1.712000e-01 8.587000e-01 4.432000e-01 9.461000e-01 5.424000e-01 1.049100e+00 2.220000e-01 1.132600e+00 3.475000e-01 9.113000e-01 3.815000e-01 5.211000e-01 2.263000e-01
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/oilTst.dat
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/oilTst.dat Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,503 @@
+ nin 12
+ nout 2
+ ndata 500
+ 6.448000e-01 4.650000e-01 3.659000e-01 8.761000e-01 2.214000e-01 7.023000e-01 3.702000e-01 8.750000e-01 2.981000e-01 5.870000e-01 3.130000e-01 9.374000e-01 2.574000e-01 4.349000e-01
+ 5.013000e-01 9.710000e-02 7.749000e-01 1.599000e-01 8.716000e-01 1.946000e-01 7.986000e-01 1.304000e-01 8.874000e-01 1.974000e-01 6.991000e-01 2.499000e-01 4.547000e-01 8.450000e-02
+ 3.908000e-01 4.842000e-01 5.148000e-01 8.738000e-01 6.925000e-01 8.681000e-01 5.985000e-01 7.700000e-01 5.325000e-01 1.079000e+00 6.069000e-01 7.554000e-01 3.284000e-01 4.656000e-01
+ 1.038400e+00 8.240000e-02 1.578400e+00 1.819000e-01 1.242100e+00 5.329000e-01 1.780600e+00 -5.680000e-02 1.239700e+00 3.690000e-01 1.656900e+00 8.240000e-02 8.512000e-01 1.109000e-01
+-9.100000e-03 6.477000e-01 3.510000e-02 9.081000e-01 3.810000e-02 1.031800e+00 6.780000e-02 1.656900e+00 3.950000e-02 1.950300e+00 -2.490000e-02 3.090000e-02 7.000000e-03 5.461000e-01
+ 6.458000e-01 2.965000e-01 9.246000e-01 3.379000e-01 1.054200e+00 3.263000e-01 1.751900e+00 -2.230000e-02 2.070800e+00 -7.860000e-02 -1.010000e-02 1.030000e-02 5.419000e-01 2.027000e-01
+ 4.951000e-01 5.899000e-01 9.069000e-01 7.700000e-01 8.367000e-01 1.101400e+00 7.952000e-01 9.014000e-01 9.185000e-01 1.039600e+00 7.024000e-01 1.008900e+00 4.378000e-01 5.457000e-01
+ 1.136300e+00 -3.530000e-02 3.838000e-01 2.079000e-01 3.385000e-01 1.536000e-01 4.569000e-01 1.199000e-01 2.972000e-01 1.929000e-01 3.505000e-01 2.441000e-01 3.074000e-01 1.153000e-01
+ 1.646000e-01 5.148000e-01 4.558000e-01 5.405000e-01 5.855000e-01 5.258000e-01 1.727000e+00 4.400000e-03 -1.250000e-02 2.019000e+00 3.960000e-02 -4.630000e-02 2.334000e-01 3.525000e-01
+ 2.028000e-01 3.866000e-01 2.659000e-01 6.712000e-01 3.368000e-01 7.250000e-01 3.495000e-01 5.704000e-01 3.358000e-01 7.372000e-01 2.509000e-01 6.834000e-01 1.608000e-01 3.771000e-01
+ 6.930000e-01 1.407000e-01 1.015300e+00 3.020000e-01 1.114100e+00 3.990000e-01 1.070600e+00 2.370000e-01 1.106800e+00 4.251000e-01 1.041400e+00 2.751000e-01 6.056000e-01 1.516000e-01
+ 5.753000e-01 5.257000e-01 8.693000e-01 5.906000e-01 8.992000e-01 6.983000e-01 1.670000e+00 7.510000e-02 1.942200e+00 6.400000e-02 -2.000000e-02 1.753100e+00 5.033000e-01 3.513000e-01
+ 8.400000e-03 5.567000e-01 2.480000e-02 8.560000e-01 1.377000e-01 8.414000e-01 8.530000e-02 7.866000e-01 -6.210000e-02 1.098600e+00 9.400000e-02 7.736000e-01 1.660000e-02 4.914000e-01
+ 1.122000e-01 2.212000e-01 2.767000e-01 2.258000e-01 4.065000e-01 1.461000e-01 2.236000e-01 2.891000e-01 2.271000e-01 3.709000e-01 2.522000e-01 2.523000e-01 1.470000e-01 1.456000e-01
+ 2.765000e-01 8.222000e-01 6.055000e-01 9.075000e-01 7.480000e-01 8.832000e-01 1.749700e+00 -2.060000e-02 4.540000e-02 1.950100e+00 4.800000e-02 1.672300e+00 3.080000e-01 5.781000e-01
+ 3.000000e-01 2.843000e-01 5.036000e-01 4.081000e-01 6.104000e-01 4.229000e-01 4.810000e-01 4.380000e-01 5.600000e-01 5.012000e-01 4.791000e-01 4.364000e-01 2.898000e-01 2.373000e-01
+ 1.142700e+00 -4.400000e-02 8.870000e-01 1.008000e-01 7.355000e-01 2.940000e-02 9.660000e-01 8.200000e-03 6.765000e-01 7.800000e-02 1.012500e+00 -4.360000e-02 5.768000e-01 3.400000e-02
+ 2.788000e-01 1.947000e-01 4.153000e-01 3.330000e-01 4.493000e-01 4.033000e-01 3.633000e-01 3.906000e-01 4.544000e-01 4.106000e-01 3.182000e-01 4.397000e-01 2.318000e-01 1.993000e-01
+ 4.448000e-01 6.422000e-01 8.192000e-01 5.634000e-01 8.853000e-01 6.292000e-01 1.754600e+00 -2.460000e-02 2.870000e-02 1.963900e+00 -1.710000e-02 1.754600e+00 4.858000e-01 3.275000e-01
+ 1.169600e+00 -7.950000e-02 6.595000e-01 3.301000e-01 5.647000e-01 2.009000e-01 7.108000e-01 2.644000e-01 4.990000e-01 2.600000e-01 6.660000e-01 3.246000e-01 4.636000e-01 1.476000e-01
+ 2.800000e-02 4.749000e-01 3.082000e-01 5.165000e-01 4.995000e-01 4.292000e-01 -6.680000e-02 1.809500e+00 -5.310000e-02 6.340000e-02 7.020000e-02 -8.250000e-02 1.794000e-01 2.888000e-01
+-2.050000e-02 1.080400e+00 1.990000e-01 1.171300e+00 2.044000e-01 1.301100e+00 -6.300000e-02 1.807000e+00 -1.090000e-01 2.131100e+00 9.110000e-02 1.625400e+00 1.084000e-01 6.975000e-01
+-1.990000e-02 1.129400e+00 6.400000e-03 1.475300e+00 1.195000e-01 1.479400e+00 -8.570000e-02 1.833300e+00 -5.250000e-02 2.061500e+00 8.600000e-02 1.630200e+00 1.760000e-02 8.479000e-01
+ 7.734000e-01 3.108000e-01 1.078300e+00 6.418000e-01 1.267100e+00 6.887000e-01 1.212800e+00 4.844000e-01 1.260900e+00 7.241000e-01 1.197800e+00 5.027000e-01 6.789000e-01 3.063000e-01
+ 4.740000e-01 6.379000e-01 2.710000e-01 7.720000e-01 2.148000e-01 5.921000e-01 2.197000e-01 8.294000e-01 2.198000e-01 5.664000e-01 2.387000e-01 8.073000e-01 2.159000e-01 4.153000e-01
+ 1.412000e-01 9.814000e-01 1.210000e-01 5.891000e-01 5.280000e-02 5.381000e-01 1.400000e-01 5.603000e-01 1.442000e-01 4.171000e-01 1.087000e-01 6.031000e-01 1.059000e-01 3.814000e-01
+ 8.093000e-01 1.447000e-01 1.019800e+00 2.690000e-01 1.202300e+00 2.014000e-01 1.642000e+00 1.033000e-01 2.117700e+00 -1.391000e-01 -8.490000e-02 1.015000e-01 6.179000e-01 1.411000e-01
+ 6.051000e-01 3.129000e-01 1.013600e+00 4.145000e-01 1.062800e+00 5.765000e-01 9.327000e-01 5.089000e-01 1.094600e+00 5.687000e-01 9.348000e-01 5.066000e-01 5.441000e-01 2.873000e-01
+ 1.044400e+00 7.250000e-02 7.841000e-01 6.701000e-01 5.830000e-01 4.092000e-01 7.340000e-01 7.407000e-01 4.950000e-01 4.886000e-01 7.533000e-01 7.148000e-01 4.912000e-01 2.421000e-01
+ 6.847000e-01 3.897000e-01 9.817000e-01 4.101000e-01 1.045500e+00 4.734000e-01 1.734700e+00 -3.000000e-04 1.986600e+00 1.320000e-02 5.550000e-02 1.665800e+00 5.296000e-01 2.952000e-01
+ 1.740000e-02 8.344000e-01 -7.070000e-02 1.251500e+00 4.490000e-02 1.250200e+00 -8.910000e-02 1.834800e+00 3.770000e-02 1.952000e+00 1.850000e-02 -2.100000e-02 2.190000e-02 6.670000e-01
+ 2.622000e-01 8.393000e-01 1.888000e-01 7.998000e-01 2.062000e-01 5.559000e-01 2.330000e-01 7.465000e-01 1.065000e-01 6.558000e-01 1.107000e-01 8.962000e-01 1.222000e-01 4.926000e-01
+ 6.600000e-03 1.097000e+00 4.751000e-01 9.095000e-01 5.517000e-01 9.640000e-01 1.777900e+00 -5.290000e-02 -2.350000e-02 2.028300e+00 5.520000e-02 1.666900e+00 2.132000e-01 6.103000e-01
+ 4.735000e-01 2.950000e-02 6.149000e-01 2.003000e-01 7.815000e-01 1.288000e-01 6.322000e-01 1.794000e-01 6.874000e-01 2.575000e-01 6.046000e-01 2.099000e-01 3.779000e-01 8.710000e-02
+ 6.480000e-02 5.514000e-01 3.730000e-02 9.392000e-01 1.505000e-01 9.401000e-01 2.350000e-02 9.564000e-01 1.424000e-01 9.696000e-01 3.140000e-02 9.441000e-01 3.910000e-02 5.217000e-01
+ 1.271900e+00 -1.940000e-01 4.195000e-01 1.021300e+00 3.379000e-01 6.556000e-01 4.200000e-01 1.027200e+00 3.894000e-01 5.664000e-01 4.430000e-01 1.001100e+00 3.273000e-01 4.017000e-01
+ 3.084000e-01 7.897000e-01 1.522000e-01 1.576900e+00 7.180000e-02 9.782000e-01 2.220000e-01 1.498500e+00 4.430000e-02 9.810000e-01 9.340000e-02 1.646600e+00 1.257000e-01 6.302000e-01
+ 5.818000e-01 2.661000e-01 8.678000e-01 4.688000e-01 9.546000e-01 5.728000e-01 7.304000e-01 6.334000e-01 8.540000e-01 7.134000e-01 8.607000e-01 4.792000e-01 5.011000e-01 2.696000e-01
+ 5.940000e-02 7.090000e-01 4.563000e-01 6.108000e-01 5.679000e-01 6.220000e-01 1.778700e+00 -5.690000e-02 3.970000e-02 1.952300e+00 3.590000e-02 -4.270000e-02 2.095000e-01 4.262000e-01
+ 5.933000e-01 2.304000e-01 9.529000e-01 3.319000e-01 1.151400e+00 2.991000e-01 9.822000e-01 3.042000e-01 1.184000e+00 2.901000e-01 9.725000e-01 3.120000e-01 5.705000e-01 1.696000e-01
+ 8.719000e-01 2.312000e-01 4.310000e-01 5.507000e-01 3.146000e-01 4.567000e-01 4.258000e-01 5.563000e-01 3.740000e-01 3.700000e-01 3.330000e-01 6.646000e-01 2.901000e-01 3.226000e-01
+ 1.360000e-02 9.961000e-01 2.466000e-01 1.077500e+00 3.379000e-01 1.112000e+00 5.620000e-02 1.662300e+00 -4.080000e-02 2.046100e+00 -1.250000e-02 1.610000e-02 1.257000e-01 6.558000e-01
+ 1.218000e+00 -1.338000e-01 6.016000e-01 1.136800e+00 4.434000e-01 7.737000e-01 6.493000e-01 1.074600e+00 5.333000e-01 6.233000e-01 5.916000e-01 1.146600e+00 4.410000e-01 3.835000e-01
+ 7.636000e-01 1.110000e-01 1.172100e+00 4.000000e-04 1.307500e+00 -1.810000e-02 1.756100e+00 -3.150000e-02 1.876900e+00 1.464000e-01 4.910000e-02 -5.820000e-02 6.767000e-01 1.780000e-02
+ 4.733000e-01 5.533000e-01 7.878000e-01 5.479000e-01 9.136000e-01 5.419000e-01 1.809900e+00 -8.920000e-02 -7.870000e-02 2.088800e+00 2.570000e-02 -3.140000e-02 4.780000e-01 3.072000e-01
+ 2.005000e-01 5.829000e-01 6.213000e-01 4.544000e-01 6.185000e-01 6.031000e-01 1.674800e+00 6.370000e-02 4.690000e-02 1.944500e+00 4.540000e-02 -5.330000e-02 2.894000e-01 3.546000e-01
+ 2.873000e-01 3.904000e-01 5.542000e-01 4.862000e-01 6.085000e-01 5.771000e-01 6.228000e-01 4.063000e-01 6.241000e-01 5.769000e-01 5.590000e-01 4.789000e-01 3.226000e-01 2.772000e-01
+ 1.065800e+00 4.890000e-02 9.754000e-01 2.339000e-01 7.418000e-01 1.505000e-01 8.248000e-01 4.087000e-01 6.896000e-01 1.874000e-01 8.415000e-01 3.902000e-01 5.896000e-01 9.380000e-02
+ 2.749000e-01 8.322000e-01 7.550000e-02 8.470000e-01 1.179000e-01 6.081000e-01 2.079000e-01 6.938000e-01 1.133000e-01 5.986000e-01 1.856000e-01 7.187000e-01 1.310000e-01 4.502000e-01
+ 1.160400e+00 -7.070000e-02 3.873000e-01 4.790000e-01 3.702000e-01 3.135000e-01 3.731000e-01 4.934000e-01 4.204000e-01 2.415000e-01 3.768000e-01 4.884000e-01 3.431000e-01 2.132000e-01
+ 7.850000e-01 3.097000e-01 1.039200e+00 6.899000e-01 1.007400e+00 9.179000e-01 1.606700e+00 1.494000e-01 2.023600e+00 -2.750000e-02 7.740000e-02 1.643000e+00 6.075000e-01 3.769000e-01
+ 4.767000e-01 6.200000e-01 8.124000e-01 6.201000e-01 8.201000e-01 7.476000e-01 1.762100e+00 -3.200000e-02 2.850000e-02 1.962500e+00 -2.400000e-03 1.735200e+00 4.170000e-01 4.287000e-01
+ 7.000000e-02 5.804000e-01 3.603000e-01 6.051000e-01 5.032000e-01 5.804000e-01 1.808000e+00 -9.050000e-02 2.450000e-02 1.971800e+00 5.380000e-02 -6.290000e-02 2.108000e-01 3.495000e-01
+ 2.398000e-01 5.381000e-01 4.280000e-01 7.827000e-01 4.594000e-01 9.249000e-01 4.543000e-01 7.498000e-01 4.837000e-01 9.178000e-01 3.558000e-01 8.688000e-01 2.354000e-01 4.663000e-01
+ 6.772000e-01 4.153000e-01 8.702000e-01 6.673000e-01 1.097600e+00 5.371000e-01 1.671400e+00 7.230000e-02 1.913400e+00 1.031000e-01 4.360000e-02 1.682100e+00 5.430000e-01 3.427000e-01
+ 5.771000e-01 2.690000e-01 9.626000e-01 1.865000e-01 9.748000e-01 3.090000e-01 1.720800e+00 1.200000e-02 1.904000e+00 1.157000e-01 4.180000e-02 -5.090000e-02 5.076000e-01 1.776000e-01
+ 9.678000e-01 1.205000e-01 4.169000e-01 7.365000e-01 2.957000e-01 5.802000e-01 4.317000e-01 7.145000e-01 3.142000e-01 5.355000e-01 3.243000e-01 8.496000e-01 2.935000e-01 3.748000e-01
+ 3.039000e-01 8.250000e-02 5.032000e-01 1.001000e-01 6.313000e-01 4.200000e-02 5.443000e-01 4.810000e-02 6.187000e-01 7.040000e-02 5.802000e-01 5.800000e-03 3.000000e-01 4.560000e-02
+ 1.066000e+00 4.740000e-02 8.390000e-01 3.301000e-01 6.501000e-01 2.295000e-01 8.427000e-01 3.239000e-01 6.770000e-01 1.715000e-01 7.632000e-01 4.213000e-01 5.597000e-01 1.096000e-01
+ 4.445000e-01 4.328000e-01 7.354000e-01 6.306000e-01 7.351000e-01 8.423000e-01 7.142000e-01 6.597000e-01 8.077000e-01 7.814000e-01 6.867000e-01 6.933000e-01 3.992000e-01 3.965000e-01
+ 1.113800e+00 -1.200000e-02 4.509000e-01 1.285400e+00 4.255000e-01 1.115600e+00 4.581000e-01 1.271400e+00 3.918000e-01 1.065600e+00 4.900000e-01 1.234000e+00 3.392000e-01 5.883000e-01
+ 3.929000e-01 1.168000e-01 6.146000e-01 2.238000e-01 8.554000e-01 8.000000e-02 1.677700e+00 6.240000e-02 -6.710000e-02 7.830000e-02 3.840000e-02 -4.460000e-02 3.705000e-01 1.066000e-01
+ 1.174900e+00 -7.820000e-02 5.318000e-01 1.208200e+00 4.090000e-01 8.032000e-01 6.121000e-01 1.106000e+00 4.824000e-01 6.767000e-01 5.083000e-01 1.234000e+00 3.942000e-01 4.315000e-01
+ 1.112500e+00 -1.900000e-03 7.953000e-01 4.182000e-01 5.772000e-01 3.317000e-01 8.666000e-01 3.376000e-01 6.822000e-01 1.837000e-01 7.594000e-01 4.607000e-01 5.409000e-01 1.411000e-01
+ 1.064300e+00 4.830000e-02 4.031000e-01 4.715000e-01 4.217000e-01 2.610000e-01 5.285000e-01 3.166000e-01 4.123000e-01 2.549000e-01 4.966000e-01 3.544000e-01 3.839000e-01 1.711000e-01
+ 1.462000e-01 1.741000e-01 3.355000e-01 3.210000e-01 5.901000e-01 1.645000e-01 1.819900e+00 -1.075000e-01 4.480000e-02 -5.060000e-02 -1.540000e-02 1.900000e-02 1.997000e-01 1.640000e-01
+ 1.082600e+00 2.900000e-02 5.759000e-01 1.143600e+00 3.587000e-01 1.001100e+00 3.984000e-01 1.351900e+00 3.697000e-01 9.311000e-01 4.609000e-01 1.280500e+00 3.813000e-01 4.929000e-01
+ 1.749000e-01 9.337000e-01 4.971000e-01 9.799000e-01 5.158000e-01 1.106500e+00 1.858000e+00 -1.477000e-01 5.880000e-02 1.932100e+00 -9.480000e-02 1.840300e+00 2.799000e-01 5.831000e-01
+ 2.320000e-02 4.472000e-01 2.880000e-02 7.578000e-01 1.640000e-02 8.930000e-01 -2.840000e-02 1.768200e+00 -4.300000e-03 5.000000e-03 1.630000e-02 -1.920000e-02 1.400000e-03 4.502000e-01
+-1.890000e-02 9.049000e-01 2.733000e-01 9.240000e-01 4.054000e-01 9.117000e-01 -1.630000e-02 1.749400e+00 8.600000e-03 1.989800e+00 1.650000e-02 -1.970000e-02 1.497000e-01 5.558000e-01
+ 3.017000e-01 1.810000e-02 5.904000e-01 5.250000e-02 8.312000e-01 -9.490000e-02 1.793900e+00 -7.140000e-02 2.200000e-03 -2.400000e-03 1.700000e-03 -1.600000e-03 3.347000e-01 2.090000e-02
+ 3.039000e-01 3.121000e-01 6.046000e-01 3.240000e-01 8.146000e-01 2.188000e-01 1.698900e+00 4.070000e-02 -9.370000e-02 2.104700e+00 -1.610000e-02 2.060000e-02 3.003000e-01 2.485000e-01
+ 1.249000e-01 9.827000e-01 6.260000e-02 1.675200e+00 8.900000e-02 9.563000e-01 1.085000e-01 1.615700e+00 5.530000e-02 9.614000e-01 3.070000e-02 1.713800e+00 7.130000e-02 6.857000e-01
+ 1.155700e+00 -5.860000e-02 3.913000e-01 1.361500e+00 4.194000e-01 8.176000e-01 4.429000e-01 1.295800e+00 4.218000e-01 7.674000e-01 4.723000e-01 1.270600e+00 3.738000e-01 4.626000e-01
+ 2.459000e-01 7.784000e-01 5.481000e-01 7.902000e-01 6.902000e-01 7.715000e-01 1.765200e+00 -3.940000e-02 -1.560000e-02 2.020000e+00 4.010000e-02 -4.900000e-02 3.023000e-01 4.882000e-01
+ 6.220000e-01 4.951000e-01 3.017000e-01 7.115000e-01 2.143000e-01 5.816000e-01 2.724000e-01 7.436000e-01 2.240000e-01 5.504000e-01 3.288000e-01 6.809000e-01 2.659000e-01 3.523000e-01
+ 3.249000e-01 2.344000e-01 5.291000e-01 3.440000e-01 5.390000e-01 4.644000e-01 6.531000e-01 1.941000e-01 5.935000e-01 4.141000e-01 5.112000e-01 3.632000e-01 3.074000e-01 1.955000e-01
+ 3.186000e-01 6.189000e-01 7.353000e-01 5.008000e-01 7.807000e-01 5.874000e-01 1.759200e+00 -3.670000e-02 1.700000e-02 1.983200e+00 -3.570000e-02 4.160000e-02 3.922000e-01 3.411000e-01
+ 8.110000e-02 5.730000e-01 -5.400000e-03 1.059500e+00 4.620000e-02 1.141800e+00 9.510000e-02 9.377000e-01 4.330000e-02 1.157600e+00 4.870000e-02 9.916000e-01 3.510000e-02 5.644000e-01
+ 2.594000e-01 3.892000e-01 5.220000e-01 4.511000e-01 6.600000e-01 4.274000e-01 1.716700e+00 1.820000e-02 -2.340000e-02 2.023900e+00 2.910000e-02 -3.230000e-02 3.178000e-01 2.422000e-01
+ 5.516000e-01 4.323000e-01 9.537000e-01 3.289000e-01 1.019900e+00 3.933000e-01 1.765300e+00 -3.410000e-02 2.026400e+00 -2.980000e-02 1.780000e-02 -2.150000e-02 5.026000e-01 2.628000e-01
+ 1.111500e+00 -6.900000e-03 5.701000e-01 1.157500e+00 4.381000e-01 1.535200e+00 4.560000e-01 1.290200e+00 4.360000e-01 1.382700e+00 5.761000e-01 1.156000e+00 4.032000e-01 5.885000e-01
+ 4.961000e-01 1.890000e-01 7.077000e-01 3.780000e-01 8.067000e-01 4.309000e-01 7.211000e-01 3.645000e-01 8.028000e-01 4.556000e-01 7.352000e-01 3.442000e-01 3.970000e-01 2.335000e-01
+ 1.133100e+00 -3.640000e-02 4.325000e-01 4.307000e-01 3.780000e-01 3.109000e-01 4.364000e-01 4.285000e-01 3.326000e-01 3.505000e-01 4.787000e-01 3.815000e-01 3.200000e-01 2.438000e-01
+ 4.649000e-01 6.439000e-01 7.775000e-01 6.826000e-01 9.982000e-01 5.638000e-01 1.757300e+00 -2.250000e-02 -1.580000e-02 2.011300e+00 4.970000e-02 1.671300e+00 4.476000e-01 4.080000e-01
+ 5.782000e-01 6.300000e-03 9.617000e-01 -6.070000e-02 1.034800e+00 4.300000e-03 9.053000e-01 7.600000e-03 1.051300e+00 2.400000e-03 1.007700e+00 -1.158000e-01 5.201000e-01 7.200000e-03
+ 7.918000e-01 3.077000e-01 5.134000e-01 9.919000e-01 2.700000e-01 7.473000e-01 4.521000e-01 1.058100e+00 2.535000e-01 7.352000e-01 4.208000e-01 1.096100e+00 2.795000e-01 4.604000e-01
+ 5.928000e-01 5.222000e-01 1.003600e+00 4.579000e-01 1.078100e+00 5.070000e-01 1.647000e+00 9.860000e-02 2.106100e+00 -1.167000e-01 3.710000e-02 1.683300e+00 5.517000e-01 3.079000e-01
+ 5.960000e-02 7.629000e-01 3.239000e-01 8.181000e-01 4.259000e-01 8.440000e-01 1.670200e+00 7.190000e-02 -2.630000e-02 2.035400e+00 9.080000e-02 -1.085000e-01 1.982000e-01 4.708000e-01
+ 1.049800e+00 6.410000e-02 5.919000e-01 2.174000e-01 3.979000e-01 2.718000e-01 4.788000e-01 3.486000e-01 4.119000e-01 2.361000e-01 5.073000e-01 3.169000e-01 3.661000e-01 1.799000e-01
+ 1.155500e+00 -5.990000e-02 1.190600e+00 2.981000e-01 9.963000e-01 -1.430000e-02 1.078900e+00 4.294000e-01 8.709000e-01 1.026000e-01 1.261800e+00 2.111000e-01 6.923000e-01 4.150000e-02
+ 6.215000e-01 2.120000e-01 1.045100e+00 2.494000e-01 1.139400e+00 3.451000e-01 1.084900e+00 1.967000e-01 1.175900e+00 3.235000e-01 1.139200e+00 1.358000e-01 6.034000e-01 1.435000e-01
+ 2.349000e-01 1.442000e-01 2.265000e-01 3.929000e-01 2.736000e-01 4.296000e-01 2.931000e-01 3.112000e-01 2.363000e-01 4.802000e-01 2.454000e-01 3.726000e-01 1.406000e-01 2.151000e-01
+ 3.019000e-01 8.005000e-01 2.399000e-01 1.477400e+00 1.681000e-01 1.178700e+00 1.256000e-01 1.612900e+00 1.998000e-01 1.085000e+00 1.193000e-01 1.621700e+00 1.418000e-01 7.335000e-01
+ 1.147600e+00 -4.770000e-02 6.603000e-01 5.889000e-01 4.982000e-01 4.222000e-01 6.575000e-01 5.907000e-01 5.041000e-01 3.874000e-01 5.851000e-01 6.789000e-01 4.479000e-01 2.445000e-01
+ 1.101200e+00 7.300000e-03 8.426000e-01 1.042000e-01 6.891000e-01 5.540000e-02 9.469000e-01 -1.900000e-02 7.682000e-01 -6.170000e-02 8.742000e-01 6.580000e-02 5.727000e-01 2.010000e-02
+-4.210000e-02 6.206000e-01 -8.900000e-03 9.080000e-01 4.440000e-02 9.645000e-01 2.560000e-02 8.678000e-01 3.390000e-02 9.927000e-01 1.381000e-01 7.345000e-01 9.100000e-03 5.054000e-01
+ 3.173000e-01 1.439000e-01 4.652000e-01 2.672000e-01 4.718000e-01 3.732000e-01 4.224000e-01 3.186000e-01 5.422000e-01 2.998000e-01 4.661000e-01 2.660000e-01 2.607000e-01 1.635000e-01
+ 3.098000e-01 5.266000e-01 7.139000e-01 4.210000e-01 7.526000e-01 5.123000e-01 1.844200e+00 -1.276000e-01 4.160000e-02 1.950200e+00 3.560000e-02 -4.230000e-02 3.870000e-01 2.836000e-01
+ 1.113300e+00 -1.020000e-02 6.586000e-01 3.186000e-01 5.528000e-01 2.098000e-01 6.548000e-01 3.203000e-01 4.692000e-01 2.883000e-01 5.417000e-01 4.562000e-01 4.591000e-01 1.485000e-01
+ 1.120500e+00 -1.250000e-02 4.273000e-01 1.305000e+00 4.001000e-01 1.569000e+00 5.607000e-01 1.152400e+00 4.132000e-01 1.340800e+00 3.841000e-01 1.354200e+00 3.303000e-01 6.559000e-01
+ 6.990000e-01 3.968000e-01 1.746000e-01 6.364000e-01 2.241000e-01 4.211000e-01 3.194000e-01 4.643000e-01 2.155000e-01 4.168000e-01 4.041000e-01 3.630000e-01 2.536000e-01 2.708000e-01
+ 4.087000e-01 1.144000e-01 6.518000e-01 1.959000e-01 7.540000e-01 2.240000e-01 1.739300e+00 -8.900000e-03 4.300000e-03 -5.200000e-03 -2.820000e-02 3.580000e-02 3.589000e-01 1.307000e-01
+ 8.990000e-02 4.962000e-01 1.670000e-01 7.478000e-01 1.831000e-01 8.579000e-01 1.603000e-01 7.559000e-01 1.481000e-01 9.170000e-01 1.539000e-01 7.605000e-01 6.710000e-02 4.654000e-01
+ 7.275000e-01 8.800000e-02 1.026300e+00 2.773000e-01 1.203800e+00 2.712000e-01 1.026200e+00 2.689000e-01 1.253600e+00 2.343000e-01 9.783000e-01 3.308000e-01 5.823000e-01 1.706000e-01
+ 5.495000e-01 5.437000e-01 2.686000e-01 7.107000e-01 1.793000e-01 5.921000e-01 2.232000e-01 7.645000e-01 2.558000e-01 4.846000e-01 3.518000e-01 6.074000e-01 2.065000e-01 4.018000e-01
+ 5.979000e-01 2.631000e-01 9.246000e-01 2.454000e-01 1.040700e+00 2.497000e-01 1.743200e+00 -1.210000e-02 2.035700e+00 -4.280000e-02 5.510000e-02 -6.860000e-02 5.446000e-01 1.450000e-01
+ 4.053000e-01 3.193000e-01 7.128000e-01 4.065000e-01 6.600000e-01 6.450000e-01 6.435000e-01 4.922000e-01 7.326000e-01 5.784000e-01 6.610000e-01 4.686000e-01 3.683000e-01 2.864000e-01
+ 3.101000e-01 7.882000e-01 2.058000e-01 6.524000e-01 1.578000e-01 5.337000e-01 1.578000e-01 7.100000e-01 1.407000e-01 5.390000e-01 1.659000e-01 7.029000e-01 1.362000e-01 4.268000e-01
+ 3.448000e-01 7.733000e-01 2.226000e-01 3.745000e-01 1.856000e-01 3.102000e-01 2.002000e-01 3.992000e-01 2.583000e-01 2.151000e-01 1.775000e-01 4.266000e-01 1.738000e-01 2.538000e-01
+ 1.163000e-01 6.281000e-01 6.240000e-02 1.128500e+00 1.577000e-01 1.181700e+00 8.570000e-02 1.103700e+00 1.171000e-01 1.252400e+00 2.270000e-01 9.335000e-01 7.730000e-02 6.036000e-01
+ 1.841000e-01 9.089000e-01 9.620000e-02 1.631700e+00 3.060000e-02 1.153200e+00 4.420000e-02 1.694200e+00 1.440000e-02 1.136600e+00 9.730000e-02 1.630200e+00 6.780000e-02 7.479000e-01
+ 2.871000e-01 4.391000e-01 5.545000e-01 5.586000e-01 5.610000e-01 7.182000e-01 6.155000e-01 4.853000e-01 6.982000e-01 5.763000e-01 5.284000e-01 5.929000e-01 3.306000e-01 3.102000e-01
+ 6.266000e-01 4.869000e-01 8.659000e-01 8.900000e-01 1.030200e+00 9.553000e-01 1.955700e+00 -2.583000e-01 1.954300e+00 5.250000e-02 1.870000e-02 1.709700e+00 5.739000e-01 4.255000e-01
+ 2.521000e-01 5.208000e-01 4.874000e-01 6.131000e-01 7.096000e-01 4.927000e-01 1.693000e+00 4.670000e-02 2.250000e-02 1.967700e+00 1.410000e-02 -1.790000e-02 2.880000e-01 3.549000e-01
+ 1.115700e+00 -1.130000e-02 5.825000e-01 6.135000e-01 6.044000e-01 2.598000e-01 5.369000e-01 6.677000e-01 5.063000e-01 3.521000e-01 5.327000e-01 6.701000e-01 4.238000e-01 2.538000e-01
+ 4.343000e-01 2.892000e-01 6.943000e-01 4.379000e-01 8.366000e-01 4.458000e-01 8.259000e-01 2.838000e-01 8.670000e-01 4.263000e-01 7.693000e-01 3.494000e-01 4.452000e-01 2.014000e-01
+ 3.767000e-01 6.610000e-01 7.348000e-01 6.040000e-01 9.230000e-01 5.251000e-01 1.740300e+00 -4.500000e-03 -4.600000e-02 2.054200e+00 2.160000e-02 -2.470000e-02 4.012000e-01 3.929000e-01
+ 1.776000e-01 9.447000e-01 6.292000e-01 9.101000e-01 7.055000e-01 9.575000e-01 1.797300e+00 -8.050000e-02 -8.980000e-02 2.111000e+00 -2.000000e-03 1.733600e+00 3.322000e-01 5.643000e-01
+ 2.923000e-01 5.579000e-01 3.704000e-01 9.829000e-01 3.558000e-01 1.191800e+00 3.351000e-01 1.025100e+00 3.176000e-01 1.266500e+00 3.619000e-01 9.876000e-01 1.767000e-01 6.106000e-01
+ 1.100500e+00 6.600000e-03 4.421000e-01 8.560000e-01 3.823000e-01 5.487000e-01 4.909000e-01 7.966000e-01 3.241000e-01 5.907000e-01 5.004000e-01 7.838000e-01 3.299000e-01 3.734000e-01
+ 1.048000e+00 6.760000e-02 7.788000e-01 9.475000e-01 6.592000e-01 1.156800e+00 7.752000e-01 9.544000e-01 6.585000e-01 9.674000e-01 7.535000e-01 9.787000e-01 5.198000e-01 4.468000e-01
+ 6.970000e-02 9.352000e-01 2.593000e-01 1.061000e+00 3.419000e-01 1.108000e+00 7.300000e-03 1.719100e+00 -7.600000e-03 2.004800e+00 3.520000e-02 -4.400000e-02 1.164000e-01 6.692000e-01
+ 3.260000e-01 2.160000e-01 7.528000e-01 7.900000e-02 7.924000e-01 1.742000e-01 1.687000e+00 5.020000e-02 -3.940000e-02 4.670000e-02 -6.030000e-02 7.090000e-02 4.082000e-01 7.520000e-02
+ 1.050000e-01 6.663000e-01 3.348000e-01 7.601000e-01 5.875000e-01 6.110000e-01 1.713200e+00 2.610000e-02 1.228000e-01 1.855500e+00 -2.320000e-02 2.860000e-02 2.201000e-01 4.189000e-01
+ 1.002700e+00 1.252000e-01 6.166000e-01 4.814000e-01 4.798000e-01 3.617000e-01 7.088000e-01 3.755000e-01 5.636000e-01 2.377000e-01 6.239000e-01 4.772000e-01 4.345000e-01 2.159000e-01
+ 4.496000e-01 2.169000e-01 6.392000e-01 3.630000e-01 8.200000e-01 2.899000e-01 1.770400e+00 -4.520000e-02 -1.100000e-02 2.011200e+00 1.760000e-02 -1.910000e-02 4.239000e-01 1.501000e-01
+ 4.926000e-01 6.137000e-01 3.186000e-01 1.404600e+00 2.477000e-01 1.260500e+00 1.928000e-01 1.552100e+00 2.102000e-01 1.225200e+00 2.542000e-01 1.481800e+00 2.139000e-01 7.059000e-01
+ 1.138700e+00 -4.270000e-02 8.879000e-01 1.301000e-01 6.693000e-01 1.269000e-01 8.105000e-01 2.219000e-01 6.306000e-01 1.497000e-01 8.711000e-01 1.517000e-01 5.312000e-01 9.750000e-02
+ 9.711000e-01 1.022000e-01 1.494600e+00 2.016000e-01 1.763500e+00 1.583000e-01 1.554200e+00 1.290000e-01 1.834300e+00 1.014000e-01 1.488900e+00 2.066000e-01 8.892000e-01 8.380000e-02
+ 2.105000e-01 1.522000e-01 5.753000e-01 9.360000e-02 6.696000e-01 1.234000e-01 1.763000e+00 -3.660000e-02 4.100000e-03 -4.000000e-03 -1.860000e-02 2.320000e-02 3.220000e-01 5.130000e-02
+ 5.364000e-01 5.697000e-01 2.754000e-01 1.459900e+00 2.340000e-01 9.770000e-01 2.480000e-01 1.489200e+00 3.099000e-01 8.484000e-01 2.989000e-01 1.428300e+00 2.200000e-01 6.076000e-01
+ 1.165300e+00 -7.410000e-02 4.147000e-01 6.831000e-01 3.322000e-01 5.073000e-01 4.404000e-01 6.515000e-01 3.882000e-01 4.203000e-01 3.910000e-01 7.064000e-01 3.222000e-01 3.256000e-01
+ 5.929000e-01 4.753000e-01 7.672000e-01 9.424000e-01 8.572000e-01 1.086400e+00 7.978000e-01 8.958000e-01 9.469000e-01 1.005600e+00 7.537000e-01 9.506000e-01 4.475000e-01 5.341000e-01
+ 1.982000e-01 7.611000e-01 4.901000e-01 7.844000e-01 6.065000e-01 7.851000e-01 1.734700e+00 -4.100000e-03 9.350000e-02 1.890800e+00 -3.790000e-02 4.530000e-02 2.528000e-01 5.018000e-01
+ 1.546000e-01 4.171000e-01 2.119000e-01 6.868000e-01 2.017000e-01 8.355000e-01 3.120000e-01 5.704000e-01 2.865000e-01 7.473000e-01 2.101000e-01 6.932000e-01 1.401000e-01 3.778000e-01
+ 5.030000e-02 4.674000e-01 7.340000e-02 7.643000e-01 2.045000e-01 7.558000e-01 -4.600000e-03 1.736500e+00 -6.460000e-02 7.550000e-02 -1.480000e-02 1.930000e-02 5.150000e-02 4.315000e-01
+ 1.041000e-01 6.762000e-01 3.926000e-01 7.050000e-01 5.157000e-01 7.068000e-01 1.774500e+00 -5.130000e-02 3.810000e-02 1.958000e+00 -2.210000e-02 2.530000e-02 2.124000e-01 4.345000e-01
+ 6.700000e-03 5.981000e-01 -6.650000e-02 1.033500e+00 -2.740000e-02 1.112100e+00 3.610000e-02 9.109000e-01 1.080000e-02 1.085600e+00 -6.430000e-02 1.026400e+00 6.900000e-03 5.412000e-01
+ 2.601000e-01 8.426000e-01 1.748000e-01 5.536000e-01 1.488000e-01 4.436000e-01 2.177000e-01 4.987000e-01 1.135000e-01 4.778000e-01 1.067000e-01 6.321000e-01 1.217000e-01 3.789000e-01
+ 7.495000e-01 3.638000e-01 3.087000e-01 7.132000e-01 2.289000e-01 5.704000e-01 4.013000e-01 5.987000e-01 4.147000e-01 3.338000e-01 3.795000e-01 6.300000e-01 2.800000e-01 3.398000e-01
+ 2.536000e-01 3.032000e-01 3.890000e-01 4.836000e-01 4.285000e-01 5.635000e-01 4.099000e-01 4.558000e-01 5.144000e-01 4.786000e-01 3.665000e-01 5.107000e-01 2.354000e-01 2.655000e-01
+ 1.108700e+00 -1.400000e-03 6.231000e-01 1.103400e+00 5.175000e-01 5.217000e-01 5.151000e-01 1.232300e+00 5.308000e-01 4.732000e-01 4.814000e-01 1.269600e+00 4.136000e-01 3.438000e-01
+ 1.243000e-01 8.552000e-01 2.607000e-01 1.254300e+00 2.224000e-01 1.517600e+00 2.060000e-01 1.322800e+00 2.795000e-01 1.476000e+00 3.078000e-01 1.201100e+00 1.504000e-01 7.250000e-01
+ 9.213000e-01 1.830000e-01 1.271100e+00 2.182000e-01 1.436000e+00 1.685000e-01 1.739100e+00 -6.500000e-03 2.118200e+00 -1.382000e-01 2.510000e-02 1.703800e+00 7.304000e-01 1.416000e-01
+ 4.906000e-01 2.284000e-01 7.607000e-01 3.695000e-01 7.548000e-01 5.566000e-01 8.102000e-01 3.068000e-01 8.265000e-01 4.896000e-01 7.111000e-01 4.283000e-01 4.367000e-01 2.166000e-01
+ 5.023000e-01 2.296000e-01 8.095000e-01 3.391000e-01 8.645000e-01 4.546000e-01 7.044000e-01 4.620000e-01 8.431000e-01 5.025000e-01 7.605000e-01 3.984000e-01 4.166000e-01 2.559000e-01
+ 1.156500e+00 -5.760000e-02 6.427000e-01 1.085200e+00 4.490000e-01 9.378000e-01 5.651000e-01 1.172700e+00 4.125000e-01 9.192000e-01 6.713000e-01 1.048000e+00 4.359000e-01 4.478000e-01
+ 3.471000e-01 4.969000e-01 5.672000e-01 6.026000e-01 6.693000e-01 6.248000e-01 1.642200e+00 1.036000e-01 2.180000e-02 1.976600e+00 -6.900000e-03 6.900000e-03 3.297000e-01 3.596000e-01
+ 1.772000e-01 7.112000e-01 3.103000e-01 1.079600e+00 2.551000e-01 1.339400e+00 2.338000e-01 1.168600e+00 3.075000e-01 1.305000e+00 2.437000e-01 1.153100e+00 1.468000e-01 6.600000e-01
+ 4.819000e-01 5.060000e-01 7.012000e-01 8.559000e-01 8.285000e-01 9.376000e-01 6.944000e-01 8.615000e-01 7.004000e-01 1.112700e+00 7.276000e-01 8.193000e-01 4.284000e-01 4.650000e-01
+ 4.597000e-01 2.749000e-01 7.482000e-01 3.990000e-01 9.198000e-01 3.746000e-01 7.880000e-01 3.529000e-01 8.253000e-01 5.067000e-01 7.385000e-01 4.140000e-01 4.344000e-01 2.271000e-01
+ 4.523000e-01 6.470000e-01 2.413000e-01 1.312200e+00 1.894000e-01 8.252000e-01 1.803000e-01 1.379300e+00 1.961000e-01 7.919000e-01 1.998000e-01 1.361800e+00 1.884000e-01 5.538000e-01
+ 4.270000e-02 6.164000e-01 1.516000e-01 8.657000e-01 8.410000e-02 1.095600e+00 1.261000e-01 8.992000e-01 1.549000e-01 1.029100e+00 7.630000e-02 9.625000e-01 3.020000e-02 5.699000e-01
+ 4.810000e-02 6.301000e-01 3.641000e-01 6.274000e-01 4.213000e-01 6.975000e-01 4.650000e-02 1.677500e+00 -2.970000e-02 2.037900e+00 1.500000e-03 -2.200000e-03 1.792000e-01 4.041000e-01
+-5.410000e-02 8.424000e-01 2.830000e-01 8.134000e-01 4.024000e-01 8.135000e-01 6.600000e-03 1.724300e+00 -8.130000e-02 2.099400e+00 -3.590000e-02 4.370000e-02 1.669000e-01 4.733000e-01
+ 1.492000e-01 9.433000e-01 4.581000e-01 1.263300e+00 4.340000e-01 1.517700e+00 1.692600e+00 4.630000e-02 -5.270000e-02 2.056400e+00 -1.043000e-01 1.855000e+00 2.145000e-01 7.789000e-01
+ 5.950000e-01 5.037000e-01 8.418000e-01 6.593000e-01 1.035600e+00 5.684000e-01 1.736200e+00 -4.000000e-03 1.998700e+00 2.900000e-03 7.450000e-02 1.641700e+00 5.081000e-01 3.627000e-01
+ 1.041200e+00 7.690000e-02 3.429000e-01 5.131000e-01 4.083000e-01 2.563000e-01 4.457000e-01 3.872000e-01 3.686000e-01 2.875000e-01 4.201000e-01 4.196000e-01 3.165000e-01 2.346000e-01
+ 5.750000e-01 9.830000e-02 7.359000e-01 3.524000e-01 9.226000e-01 3.027000e-01 7.922000e-01 2.849000e-01 9.758000e-01 2.588000e-01 8.202000e-01 2.514000e-01 4.557000e-01 1.661000e-01
+ 2.356000e-01 2.608000e-01 3.491000e-01 4.312000e-01 5.015000e-01 3.656000e-01 3.259000e-01 4.589000e-01 3.623000e-01 5.480000e-01 3.415000e-01 4.391000e-01 2.121000e-01 2.362000e-01
+-4.400000e-03 5.750000e-02 2.999000e-01 5.590000e-02 3.436000e-01 1.495000e-01 6.220000e-02 -7.260000e-02 3.770000e-02 -4.620000e-02 -1.930000e-02 2.460000e-02 1.388000e-01 5.610000e-02
+ 3.853000e-01 2.266000e-01 6.398000e-01 2.940000e-01 7.465000e-01 3.086000e-01 1.889800e+00 -1.804000e-01 2.080000e-02 1.977300e+00 -1.660000e-02 1.840000e-02 3.421000e-01 2.053000e-01
+ 5.732000e-01 9.820000e-02 9.495000e-01 2.150000e-02 1.010600e+00 9.040000e-02 1.689700e+00 5.020000e-02 1.855100e+00 1.663000e-01 -1.440000e-02 1.560000e-02 5.282000e-01 4.370000e-02
+ 5.975000e-01 9.590000e-02 8.709000e-01 2.277000e-01 9.448000e-01 3.096000e-01 8.984000e-01 1.909000e-01 1.040400e+00 2.144000e-01 8.695000e-01 2.280000e-01 4.989000e-01 1.341000e-01
+ 1.161700e+00 -6.800000e-02 5.103000e-01 1.213400e+00 4.322000e-01 6.767000e-01 5.040000e-01 1.219600e+00 3.956000e-01 6.859000e-01 5.204000e-01 1.201400e+00 3.472000e-01 4.431000e-01
+ 1.021700e+00 9.880000e-02 4.318000e-01 4.948000e-01 3.271000e-01 4.096000e-01 4.788000e-01 4.344000e-01 3.361000e-01 3.875000e-01 4.107000e-01 5.175000e-01 3.394000e-01 2.462000e-01
+ 9.910000e-02 6.097000e-01 1.928000e-01 9.126000e-01 2.849000e-01 9.650000e-01 1.819000e-01 9.313000e-01 2.587000e-01 1.014800e+00 2.217000e-01 8.827000e-01 1.360000e-01 4.989000e-01
+ 4.196000e-01 4.070000e-01 8.509000e-01 2.705000e-01 8.090000e-01 4.584000e-01 1.633500e+00 1.120000e-01 -6.650000e-02 2.080700e+00 1.660000e-02 -1.950000e-02 4.122000e-01 2.621000e-01
+ 6.442000e-01 4.657000e-01 2.805000e-01 9.211000e-01 3.062000e-01 5.754000e-01 3.203000e-01 8.736000e-01 3.428000e-01 5.107000e-01 2.195000e-01 9.937000e-01 2.591000e-01 4.182000e-01
+ 1.083000e-01 3.704000e-01 2.198000e-01 5.239000e-01 1.973000e-01 6.579000e-01 1.038000e-01 6.641000e-01 2.724000e-01 5.815000e-01 2.600000e-01 4.734000e-01 9.710000e-02 3.372000e-01
+ 1.143400e+00 -4.700000e-02 6.394000e-01 1.084000e+00 5.072000e-01 8.219000e-01 7.274000e-01 9.770000e-01 4.117000e-01 8.792000e-01 6.133000e-01 1.115600e+00 4.261000e-01 4.439000e-01
+ 4.894000e-01 2.322000e-01 9.444000e-01 1.599000e-01 1.136500e+00 1.100000e-01 9.970000e-01 9.400000e-02 1.083900e+00 1.925000e-01 9.986000e-01 9.700000e-02 5.573000e-01 7.900000e-02
+ 6.940000e-02 4.496000e-01 3.861000e-01 4.431000e-01 5.083000e-01 4.420000e-01 1.722500e+00 8.000000e-03 -2.180000e-02 2.580000e-02 -1.260000e-02 1.530000e-02 2.012000e-01 2.778000e-01
+ 4.709000e-01 1.637000e-01 7.540000e-01 2.368000e-01 9.168000e-01 1.987000e-01 7.055000e-01 2.919000e-01 8.799000e-01 2.597000e-01 6.879000e-01 3.119000e-01 4.364000e-01 1.345000e-01
+ 3.695000e-01 7.475000e-01 7.178000e-01 7.092000e-01 8.749000e-01 6.676000e-01 1.720000e+00 6.400000e-03 -9.800000e-03 2.015000e+00 5.230000e-02 1.671600e+00 4.293000e-01 4.062000e-01
+ 2.833000e-01 6.929000e-01 3.343000e-01 1.207300e+00 4.848000e-01 1.259600e+00 4.201000e-01 1.108400e+00 4.528000e-01 1.313000e+00 3.483000e-01 1.200200e+00 2.231000e-01 6.640000e-01
+ 3.249000e-01 2.663000e-01 2.135000e-01 8.380000e-02 2.176000e-01 3.230000e-02 1.633000e-01 1.440000e-01 1.643000e-01 8.870000e-02 2.186000e-01 7.850000e-02 1.557000e-01 8.340000e-02
+ 2.509000e-01 4.415000e-01 3.253000e-01 7.774000e-01 3.237000e-01 9.379000e-01 2.093000e-01 9.146000e-01 3.470000e-01 9.313000e-01 2.318000e-01 8.854000e-01 1.571000e-01 4.836000e-01
+ 4.089000e-01 3.129000e-01 5.581000e-01 5.068000e-01 8.138000e-01 3.462000e-01 1.677900e+00 6.840000e-02 2.080000e-02 1.974000e+00 -2.290000e-02 2.710000e-02 3.926000e-01 2.173000e-01
+ 6.643000e-01 4.440000e-01 4.709000e-01 3.021000e-01 3.044000e-01 3.373000e-01 3.318000e-01 4.683000e-01 2.892000e-01 3.421000e-01 3.202000e-01 4.828000e-01 2.603000e-01 2.726000e-01
+ 1.089800e+00 1.820000e-02 4.522000e-01 1.280700e+00 2.793000e-01 9.139000e-01 4.674000e-01 1.263400e+00 4.811000e-01 6.379000e-01 5.040000e-01 1.218100e+00 3.527000e-01 4.602000e-01
+ 7.746000e-01 2.773000e-01 1.262500e+00 3.768000e-01 1.368200e+00 5.105000e-01 1.176300e+00 4.776000e-01 1.362200e+00 5.502000e-01 1.244100e+00 4.004000e-01 7.037000e-01 2.478000e-01
+ 1.214400e+00 -1.345000e-01 5.728000e-01 8.045000e-01 4.732000e-01 4.932000e-01 5.432000e-01 8.459000e-01 4.365000e-01 5.079000e-01 6.445000e-01 7.233000e-01 4.171000e-01 3.019000e-01
+ 1.020300e+00 1.023000e-01 1.339900e+00 3.863000e-01 8.918000e-01 4.010000e-01 1.400000e+00 3.113000e-01 9.264000e-01 3.066000e-01 1.256800e+00 4.880000e-01 7.044000e-01 1.491000e-01
+-8.000000e-04 8.211000e-01 2.361000e-01 9.075000e-01 3.880000e-01 8.679000e-01 -2.060000e-02 1.757600e+00 -1.680000e-02 2.020600e+00 -5.400000e-02 6.580000e-02 1.555000e-01 5.119000e-01
+ 8.590000e-02 5.093000e-01 1.094000e-01 8.284000e-01 1.888000e-01 8.672000e-01 2.009000e-01 7.242000e-01 3.171000e-01 7.291000e-01 2.109000e-01 7.078000e-01 1.055000e-01 4.283000e-01
+ 5.585000e-01 4.104000e-01 9.091000e-01 6.040000e-01 9.648000e-01 7.640000e-01 8.639000e-01 6.560000e-01 9.755000e-01 7.808000e-01 9.222000e-01 5.776000e-01 4.978000e-01 3.786000e-01
+ 6.952000e-01 9.790000e-02 1.119200e+00 1.273000e-01 1.299400e+00 1.131000e-01 1.206300e+00 2.280000e-02 1.367300e+00 5.920000e-02 1.061200e+00 1.961000e-01 6.713000e-01 4.250000e-02
+-2.900000e-02 6.100000e-01 1.768000e-01 7.117000e-01 2.572000e-01 7.548000e-01 -4.300000e-02 1.783800e+00 -1.134000e-01 2.131000e+00 2.780000e-02 -3.580000e-02 9.120000e-02 4.239000e-01
+ 6.690000e-02 1.033000e+00 5.470000e-02 4.185000e-01 6.780000e-02 3.254000e-01 1.660000e-02 4.632000e-01 3.370000e-02 3.636000e-01 6.200000e-03 4.806000e-01 1.390000e-02 3.445000e-01
+ 4.392000e-01 1.730000e-01 7.467000e-01 1.972000e-01 7.673000e-01 3.279000e-01 8.360000e-01 9.120000e-02 8.220000e-01 2.830000e-01 7.616000e-01 1.845000e-01 4.333000e-01 1.134000e-01
+ 1.127900e+00 -2.750000e-02 1.157800e+00 5.609000e-01 8.199000e-01 7.000000e-01 1.071100e+00 6.639000e-01 8.206000e-01 6.127000e-01 1.003100e+00 7.467000e-01 6.514000e-01 2.694000e-01
+ 8.172000e-01 2.907000e-01 3.781000e-01 1.356400e+00 3.831000e-01 1.575800e+00 5.063000e-01 1.203100e+00 3.232000e-01 1.481600e+00 5.606000e-01 1.139900e+00 2.864000e-01 7.047000e-01
+ 3.522000e-01 3.816000e-01 6.513000e-01 4.030000e-01 8.796000e-01 2.741000e-01 1.853000e+00 -1.437000e-01 2.020000e-02 1.981500e+00 4.100000e-02 -4.790000e-02 4.054000e-01 2.034000e-01
+ 6.243000e-01 3.136000e-01 9.404000e-01 5.338000e-01 1.054800e+00 6.177000e-01 1.003000e+00 4.583000e-01 1.125700e+00 5.655000e-01 8.917000e-01 5.895000e-01 5.389000e-01 3.107000e-01
+ 1.032000e-01 2.145000e-01 2.228000e-01 2.618000e-01 2.334000e-01 3.197000e-01 2.795000e-01 1.928000e-01 2.514000e-01 3.102000e-01 2.510000e-01 2.285000e-01 1.353000e-01 1.438000e-01
+ 9.120000e-02 4.763000e-01 8.260000e-02 8.195000e-01 1.323000e-01 8.828000e-01 1.208000e-01 7.711000e-01 1.502000e-01 8.749000e-01 8.230000e-02 8.164000e-01 4.590000e-02 4.733000e-01
+ 8.400000e-03 5.233000e-01 3.114000e-01 5.303000e-01 3.548000e-01 6.212000e-01 -7.810000e-02 1.824600e+00 -1.500000e-02 1.630000e-02 1.900000e-02 -2.190000e-02 1.548000e-01 3.324000e-01
+ 3.621000e-01 3.948000e-01 5.814000e-01 5.091000e-01 7.371000e-01 4.622000e-01 1.827200e+00 -1.137000e-01 2.880000e-02 1.964000e+00 -9.400000e-03 1.210000e-02 3.026000e-01 3.417000e-01
+ 3.580000e-01 2.763000e-01 4.854000e-01 5.267000e-01 6.305000e-01 5.067000e-01 6.041000e-01 3.864000e-01 6.265000e-01 5.287000e-01 5.911000e-01 4.011000e-01 3.291000e-01 2.458000e-01
+ 3.187000e-01 7.551000e-01 4.209000e-01 1.268700e+00 6.258000e-01 1.271600e+00 6.451000e-01 1.001000e+00 5.475000e-01 1.395100e+00 5.579000e-01 1.113300e+00 3.066000e-01 6.586000e-01
+ 1.185000e-01 7.796000e-01 2.708000e-01 1.118500e+00 3.414000e-01 1.232300e+00 2.990000e-01 1.083900e+00 4.309000e-01 1.150900e+00 3.358000e-01 1.039400e+00 1.932000e-01 6.014000e-01
+ 1.116200e+00 -1.300000e-02 5.206000e-01 2.610000e-02 4.651000e-01 -1.630000e-02 5.007000e-01 4.660000e-02 4.346000e-01 1.190000e-02 4.827000e-01 6.850000e-02 3.726000e-01 2.650000e-02
+ 8.306000e-01 9.530000e-02 1.104800e+00 3.818000e-01 1.345200e+00 3.360000e-01 1.102500e+00 3.846000e-01 1.274300e+00 4.425000e-01 1.127500e+00 3.547000e-01 6.521000e-01 2.045000e-01
+ 1.156300e+00 -6.370000e-02 9.582000e-01 7.839000e-01 8.017000e-01 5.624000e-01 1.087200e+00 6.377000e-01 7.981000e-01 5.080000e-01 9.966000e-01 7.431000e-01 6.323000e-01 2.479000e-01
+ 1.113700e+00 -1.060000e-02 5.751000e-01 8.128000e-01 5.425000e-01 4.148000e-01 5.360000e-01 8.606000e-01 6.221000e-01 2.954000e-01 6.010000e-01 7.833000e-01 4.364000e-01 2.829000e-01
+-7.000000e-04 1.000200e+00 6.240000e-02 1.491200e+00 3.840000e-02 1.734700e+00 1.108000e-01 1.435400e+00 1.197000e-01 1.670000e+00 -2.700000e-03 1.567500e+00 3.070000e-02 8.685000e-01
+ 1.240000e-01 9.872000e-01 1.695000e-01 5.126000e-01 1.271000e-01 4.343000e-01 1.380000e-02 6.953000e-01 2.270000e-02 5.462000e-01 1.374000e-01 5.482000e-01 7.740000e-02 4.041000e-01
+ 4.141000e-01 6.719000e-01 2.314000e-01 1.497700e+00 2.280000e-01 9.137000e-01 2.178000e-01 1.504500e+00 1.092000e-01 1.019000e+00 1.247000e-01 1.614500e+00 1.445000e-01 6.616000e-01
+ 6.177000e-01 1.427000e-01 9.623000e-01 1.017000e-01 1.103000e+00 7.800000e-02 1.600300e+00 1.562000e-01 1.871500e+00 1.531000e-01 2.210000e-02 -2.600000e-02 5.527000e-01 7.560000e-02
+ 1.059300e+00 5.470000e-02 9.245000e-01 3.887000e-01 7.576000e-01 1.769000e-01 9.578000e-01 3.412000e-01 7.952000e-01 1.095000e-01 1.041500e+00 2.450000e-01 6.124000e-01 9.040000e-02
+ 9.449000e-01 1.353000e-01 3.213000e-01 1.421500e+00 3.429000e-01 7.263000e-01 3.767000e-01 1.351400e+00 3.432000e-01 6.875000e-01 3.776000e-01 1.354700e+00 2.832000e-01 4.873000e-01
+ 6.301000e-01 4.748000e-01 4.008000e-01 9.958000e-01 2.741000e-01 6.979000e-01 3.320000e-01 1.073400e+00 3.438000e-01 5.908000e-01 3.196000e-01 1.086800e+00 2.474000e-01 4.766000e-01
+ 1.978000e-01 9.140000e-01 1.938000e-01 4.248000e-01 6.020000e-02 4.722000e-01 9.960000e-02 5.354000e-01 7.210000e-02 4.447000e-01 1.717000e-01 4.529000e-01 1.206000e-01 3.249000e-01
+ 2.309000e-01 2.064000e-01 6.680000e-01 5.680000e-02 6.932000e-01 1.725000e-01 1.859700e+00 -1.525000e-01 -3.130000e-02 3.630000e-02 5.910000e-02 -7.020000e-02 3.368000e-01 8.280000e-02
+ 2.211000e-01 7.943000e-01 6.555000e-01 6.537000e-01 6.771000e-01 7.673000e-01 1.808700e+00 -8.840000e-02 7.200000e-03 1.994200e+00 -1.680000e-02 1.970000e-02 3.111000e-01 4.701000e-01
+ 2.125000e-01 6.560000e-01 4.932000e-01 6.953000e-01 6.084000e-01 6.982000e-01 1.701900e+00 3.800000e-02 -5.100000e-02 2.060000e+00 -4.300000e-03 2.300000e-03 2.587000e-01 4.444000e-01
+ 5.390000e-02 1.055300e+00 1.167000e-01 5.124000e-01 -7.110000e-02 6.230000e-01 1.115000e-01 5.170000e-01 3.800000e-02 4.834000e-01 6.440000e-02 5.757000e-01 3.960000e-02 4.136000e-01
+ 2.916000e-01 2.618000e-01 3.233000e-01 5.671000e-01 5.434000e-01 4.386000e-01 4.561000e-01 4.079000e-01 4.168000e-01 6.065000e-01 3.949000e-01 4.845000e-01 2.435000e-01 2.603000e-01
+ 3.046000e-01 1.198000e-01 5.786000e-01 1.665000e-01 7.735000e-01 8.260000e-02 1.760100e+00 -3.140000e-02 2.880000e-02 -3.630000e-02 -2.370000e-02 2.720000e-02 3.292000e-01 9.370000e-02
+ 1.812000e-01 5.192000e-01 4.632000e-01 5.597000e-01 6.344000e-01 4.976000e-01 1.709800e+00 2.520000e-02 -7.980000e-02 2.089700e+00 -4.440000e-02 5.320000e-02 2.372000e-01 3.645000e-01
+ 3.179000e-01 4.891000e-01 7.155000e-01 3.883000e-01 7.770000e-01 4.547000e-01 1.566300e+00 1.949000e-01 5.830000e-02 1.927600e+00 3.480000e-02 -4.360000e-02 3.791000e-01 2.744000e-01
+ 1.477000e-01 4.547000e-01 2.569000e-01 6.831000e-01 2.450000e-01 8.360000e-01 3.765000e-01 5.412000e-01 2.740000e-01 8.186000e-01 1.857000e-01 7.708000e-01 1.411000e-01 4.037000e-01
+ 2.131000e-01 6.178000e-01 6.259000e-01 4.974000e-01 7.543000e-01 4.947000e-01 1.708600e+00 2.940000e-02 4.010000e-02 1.945600e+00 2.130000e-02 -2.470000e-02 3.206000e-01 3.490000e-01
+ 1.050800e+00 6.520000e-02 4.232000e-01 8.030000e-02 3.003000e-01 1.362000e-01 3.865000e-01 1.220000e-01 3.645000e-01 4.690000e-02 4.060000e-01 9.850000e-02 3.132000e-01 6.030000e-02
+ 2.090000e-02 7.060000e-01 2.470000e-01 8.011000e-01 3.740000e-01 7.966000e-01 -3.690000e-02 1.777300e+00 4.610000e-02 1.946200e+00 -2.460000e-02 2.910000e-02 1.503000e-01 4.630000e-01
+ 4.137000e-01 2.892000e-01 4.493000e-01 6.976000e-01 7.307000e-01 5.271000e-01 5.331000e-01 5.940000e-01 5.688000e-01 7.422000e-01 5.595000e-01 5.605000e-01 3.348000e-01 3.115000e-01
+ 5.298000e-01 6.420000e-02 6.824000e-01 2.788000e-01 7.870000e-01 3.054000e-01 6.489000e-01 3.206000e-01 8.537000e-01 2.453000e-01 7.167000e-01 2.387000e-01 4.121000e-01 1.402000e-01
+ 4.221000e-01 3.664000e-01 7.573000e-01 4.597000e-01 8.641000e-01 5.222000e-01 7.522000e-01 4.671000e-01 8.513000e-01 5.595000e-01 7.478000e-01 4.688000e-01 4.424000e-01 2.608000e-01
+ 4.622000e-01 1.290000e-01 7.078000e-01 2.222000e-01 7.751000e-01 2.817000e-01 7.012000e-01 2.305000e-01 8.362000e-01 2.308000e-01 6.095000e-01 3.373000e-01 3.959000e-01 1.415000e-01
+ 6.361000e-01 8.160000e-02 9.489000e-01 1.881000e-01 1.033400e+00 2.630000e-01 9.601000e-01 1.715000e-01 1.003700e+00 3.191000e-01 1.146200e+00 -4.830000e-02 5.663000e-01 8.560000e-02
+ 2.930000e-01 5.022000e-01 7.152000e-01 3.739000e-01 7.363000e-01 4.900000e-01 1.759500e+00 -3.130000e-02 -6.460000e-02 2.074000e+00 4.020000e-02 -4.750000e-02 3.524000e-01 2.979000e-01
+ 4.311000e-01 1.810000e-01 8.205000e-01 9.240000e-02 1.006300e+00 1.160000e-02 1.695800e+00 3.640000e-02 -8.400000e-03 2.010900e+00 -5.130000e-02 6.280000e-02 4.713000e-01 5.760000e-02
+ 3.465000e-01 1.600000e-01 6.633000e-01 1.569000e-01 7.640000e-01 1.793000e-01 1.622700e+00 1.281000e-01 1.860000e-02 -2.340000e-02 -2.300000e-03 2.000000e-03 3.697000e-01 1.015000e-01
+ 9.872000e-01 -1.126000e-01 1.332000e+00 7.640000e-02 1.548500e+00 5.010000e-02 1.269100e+00 1.510000e-01 1.585700e+00 2.980000e-02 1.368100e+00 3.380000e-02 7.755000e-01 3.590000e-02
+ 2.610000e-01 7.540000e-01 6.275000e-01 6.880000e-01 6.739000e-01 7.751000e-01 1.638600e+00 1.087000e-01 5.130000e-02 1.938700e+00 4.940000e-02 -5.850000e-02 3.284000e-01 4.529000e-01
+ 2.109000e-01 6.045000e-01 2.995000e-01 9.821000e-01 5.221000e-01 9.051000e-01 3.581000e-01 9.091000e-01 4.162000e-01 1.046800e+00 3.561000e-01 9.154000e-01 1.993000e-01 5.354000e-01
+ 5.272000e-01 1.323000e-01 1.047100e+00 -1.195000e-01 1.095700e+00 -3.110000e-02 1.817900e+00 -1.026000e-01 1.931500e+00 7.750000e-02 6.700000e-03 -8.200000e-03 5.379000e-01 1.850000e-02
+ 2.795000e-01 5.918000e-01 4.382000e-01 9.261000e-01 5.119000e-01 1.035300e+00 4.693000e-01 8.914000e-01 5.577000e-01 1.006300e+00 3.827000e-01 9.897000e-01 2.396000e-01 5.500000e-01
+ 7.918000e-01 -6.760000e-02 9.720000e-01 9.020000e-02 1.069100e+00 1.199000e-01 1.781500e+00 -5.860000e-02 1.962800e+00 4.390000e-02 2.400000e-02 -2.710000e-02 5.785000e-01 4.470000e-02
+ 1.210900e+00 -1.261000e-01 5.953000e-01 1.149400e+00 5.685000e-01 9.512000e-01 6.746000e-01 1.050000e+00 5.015000e-01 9.448000e-01 6.418000e-01 1.093700e+00 4.564000e-01 4.655000e-01
+ 4.166000e-01 -2.610000e-02 7.158000e-01 -8.600000e-03 8.091000e-01 2.500000e-02 1.774000e+00 -4.670000e-02 -6.200000e-03 6.800000e-03 -6.670000e-02 7.770000e-02 3.702000e-01 3.560000e-02
+ 1.066400e+00 4.440000e-02 3.890000e-01 1.361000e+00 4.381000e-01 6.264000e-01 5.386000e-01 1.188200e+00 4.047000e-01 6.327000e-01 3.706000e-01 1.379200e+00 3.626000e-01 4.052000e-01
+ 3.560000e-01 5.282000e-01 6.139000e-01 5.976000e-01 7.348000e-01 5.920000e-01 1.600500e+00 1.565000e-01 -3.680000e-02 2.045900e+00 2.100000e-03 -9.000000e-04 3.704000e-01 3.378000e-01
+ 2.368000e-01 4.550000e-01 4.686000e-01 5.905000e-01 5.762000e-01 6.224000e-01 4.614000e-01 5.998000e-01 5.424000e-01 6.833000e-01 5.509000e-01 4.959000e-01 2.914000e-01 3.168000e-01
+ 1.412000e-01 5.766000e-01 2.682000e-01 7.994000e-01 4.284000e-01 7.535000e-01 2.010000e-02 1.708900e+00 -5.190000e-02 2.063600e+00 -2.990000e-02 3.540000e-02 1.924000e-01 4.226000e-01
+ 4.028000e-01 3.229000e-01 5.538000e-01 5.916000e-01 6.593000e-01 6.407000e-01 5.754000e-01 5.726000e-01 6.679000e-01 6.512000e-01 4.964000e-01 6.656000e-01 3.227000e-01 3.403000e-01
+ 1.014100e+00 1.139000e-01 6.173000e-01 1.123200e+00 5.625000e-01 5.521000e-01 7.679000e-01 9.384000e-01 5.604000e-01 5.170000e-01 5.513000e-01 1.193400e+00 4.537000e-01 3.371000e-01
+ 2.231000e-01 2.160000e-01 3.652000e-01 3.177000e-01 3.538000e-01 4.355000e-01 3.714000e-01 3.082000e-01 3.945000e-01 4.011000e-01 3.866000e-01 2.949000e-01 2.034000e-01 1.923000e-01
+ 3.449000e-01 7.698000e-01 2.342000e-01 1.963000e-01 1.374000e-01 2.406000e-01 2.404000e-01 1.906000e-01 1.173000e-01 2.574000e-01 2.592000e-01 1.675000e-01 1.858000e-01 1.419000e-01
+ 3.156000e-01 1.832000e-01 3.843000e-01 4.186000e-01 5.150000e-01 3.846000e-01 4.053000e-01 3.939000e-01 4.708000e-01 4.471000e-01 4.462000e-01 3.420000e-01 2.460000e-01 2.126000e-01
+ 6.092000e-01 1.414000e-01 8.663000e-01 3.277000e-01 8.921000e-01 4.823000e-01 8.205000e-01 3.809000e-01 1.009700e+00 3.674000e-01 9.123000e-01 2.730000e-01 5.124000e-01 1.745000e-01
+ 8.463000e-01 1.475000e-01 1.383300e+00 1.659000e-01 1.409400e+00 3.871000e-01 1.236000e+00 3.424000e-01 1.439400e+00 3.775000e-01 1.240400e+00 3.315000e-01 7.584000e-01 1.434000e-01
+ 1.186000e-01 5.166000e-01 2.754000e-01 7.017000e-01 3.716000e-01 7.266000e-01 1.927000e-01 8.017000e-01 2.624000e-01 8.739000e-01 3.585000e-01 6.027000e-01 1.595000e-01 4.034000e-01
+-7.150000e-02 1.194100e+00 1.877000e-01 1.441100e+00 3.201000e-01 1.431200e+00 4.790000e-02 1.673200e+00 -5.740000e-02 2.074100e+00 1.340000e-02 1.714800e+00 9.210000e-02 8.434000e-01
+ 1.061600e+00 5.140000e-02 8.015000e-01 9.388000e-01 6.061000e-01 6.110000e-01 9.143000e-01 7.975000e-01 6.790000e-01 4.857000e-01 7.059000e-01 1.043000e+00 5.455000e-01 2.809000e-01
+ 7.813000e-01 2.707000e-01 1.166100e+00 1.809000e-01 1.340200e+00 1.161000e-01 1.784300e+00 -6.440000e-02 2.014600e+00 -1.000000e-02 4.940000e-02 -5.860000e-02 6.698000e-01 1.289000e-01
+-2.000000e-02 8.819000e-01 2.728000e-01 8.867000e-01 2.715000e-01 1.033800e+00 1.349000e-01 1.575400e+00 2.450000e-02 1.974500e+00 2.900000e-02 -3.400000e-02 1.103000e-01 5.812000e-01
+ 1.124500e+00 -2.180000e-02 4.186000e-01 1.652000e-01 3.845000e-01 9.650000e-02 4.261000e-01 1.539000e-01 3.543000e-01 1.256000e-01 4.996000e-01 6.970000e-02 3.512000e-01 6.390000e-02
+-6.500000e-03 7.993000e-01 3.267000e-01 7.767000e-01 4.522000e-01 7.680000e-01 -2.820000e-02 1.766800e+00 4.970000e-02 1.942100e+00 -5.330000e-02 6.490000e-02 1.645000e-01 4.867000e-01
+ 6.900000e-02 7.249000e-01 9.240000e-02 1.152900e+00 4.710000e-02 1.384400e+00 6.110000e-02 1.191900e+00 4.000000e-02 1.400800e+00 1.907000e-01 1.034100e+00 6.170000e-02 6.550000e-01
+ 1.428000e-01 5.786000e-01 4.101000e-01 6.276000e-01 4.227000e-01 7.570000e-01 1.796700e+00 -7.820000e-02 1.590000e-02 1.975800e+00 3.080000e-02 -3.800000e-02 2.194000e-01 3.917000e-01
+ 4.230000e-01 6.180000e-01 6.667000e-01 9.600000e-01 8.423000e-01 9.991000e-01 6.821000e-01 9.383000e-01 7.610000e-01 1.118300e+00 6.837000e-01 9.371000e-01 3.987000e-01 5.377000e-01
+ 1.123200e+00 -1.980000e-02 4.842000e-01 1.256400e+00 4.733000e-01 5.613000e-01 6.713000e-01 1.041900e+00 4.869000e-01 5.181000e-01 6.174000e-01 1.101800e+00 3.947000e-01 3.597000e-01
+ 3.235000e-01 4.008000e-01 6.078000e-01 4.450000e-01 7.643000e-01 3.984000e-01 1.743500e+00 -1.240000e-02 7.710000e-02 1.904800e+00 -4.670000e-02 5.660000e-02 3.336000e-01 2.813000e-01
+ 8.635000e-01 2.416000e-01 3.268000e-01 7.437000e-01 3.194000e-01 4.958000e-01 4.484000e-01 5.974000e-01 3.312000e-01 4.618000e-01 3.342000e-01 7.332000e-01 2.896000e-01 3.473000e-01
+ 3.989000e-01 3.064000e-01 7.602000e-01 3.139000e-01 7.549000e-01 4.913000e-01 6.895000e-01 3.998000e-01 6.527000e-01 6.348000e-01 6.298000e-01 4.730000e-01 3.961000e-01 2.339000e-01
+-5.620000e-02 6.196000e-01 6.960000e-02 8.052000e-01 1.929000e-01 8.025000e-01 2.680000e-02 1.702400e+00 5.080000e-02 1.938200e+00 3.790000e-02 -4.510000e-02 7.300000e-02 4.286000e-01
+ 7.670000e-02 5.532000e-01 3.697000e-01 5.783000e-01 5.229000e-01 5.426000e-01 1.730300e+00 2.500000e-03 -5.700000e-03 2.006900e+00 4.780000e-02 -5.690000e-02 2.083000e-01 3.421000e-01
+ 1.222200e+00 -1.385000e-01 5.873000e-01 1.141900e+00 4.907000e-01 8.950000e-01 4.724000e-01 1.276000e+00 4.236000e-01 9.068000e-01 5.406000e-01 1.195200e+00 4.016000e-01 4.845000e-01
+ 2.563000e-01 6.222000e-01 3.700000e-01 1.011000e+00 4.776000e-01 1.086300e+00 4.206000e-01 9.524000e-01 4.446000e-01 1.143800e+00 3.690000e-01 1.014000e+00 2.464000e-01 5.458000e-01
+ 1.840000e-01 6.729000e-01 4.951000e-01 6.792000e-01 5.736000e-01 7.248000e-01 1.651000e+00 9.460000e-02 -1.130000e-02 2.015000e+00 1.500000e-03 -2.300000e-03 2.611000e-01 4.318000e-01
+ 1.102100e+00 3.900000e-03 1.025300e+00 7.218000e-01 8.517000e-01 8.106000e-01 1.152900e+00 5.722000e-01 9.200000e-01 6.121000e-01 1.224200e+00 4.924000e-01 6.570000e-01 2.914000e-01
+ 4.650000e-01 1.452000e-01 7.837000e-01 1.378000e-01 9.267000e-01 1.065000e-01 1.763500e+00 -3.920000e-02 1.258000e-01 1.849400e+00 -1.710000e-02 2.010000e-02 4.704000e-01 5.990000e-02
+ 1.125400e+00 -1.860000e-02 4.118000e-01 1.341000e+00 5.358000e-01 4.982000e-01 5.828000e-01 1.137600e+00 4.601000e-01 5.488000e-01 5.933000e-01 1.129500e+00 3.844000e-01 3.734000e-01
+ 1.075400e+00 3.380000e-02 7.599000e-01 9.807000e-01 6.185000e-01 7.503000e-01 8.009000e-01 9.346000e-01 6.739000e-01 6.285000e-01 8.539000e-01 8.723000e-01 5.453000e-01 3.332000e-01
+ 3.525000e-01 3.345000e-01 5.116000e-01 5.722000e-01 5.640000e-01 6.692000e-01 4.808000e-01 6.056000e-01 5.902000e-01 6.561000e-01 5.130000e-01 5.635000e-01 3.051000e-01 3.173000e-01
+ 5.190000e-01 5.924000e-01 8.648000e-01 7.441000e-01 1.038200e+00 6.802000e-01 1.758000e+00 -3.610000e-02 1.986400e+00 1.070000e-02 -4.070000e-02 1.777700e+00 5.055000e-01 4.177000e-01
+ 5.158000e-01 1.822000e-01 8.558000e-01 1.480000e-01 9.620000e-01 1.680000e-01 1.790400e+00 -7.240000e-02 -3.050000e-02 2.032800e+00 7.580000e-02 -9.170000e-02 4.772000e-01 1.143000e-01
+ 5.610000e-01 2.925000e-01 7.260000e-01 4.631000e-01 9.444000e-01 3.525000e-01 1.723300e+00 7.400000e-03 3.060000e-02 1.958500e+00 3.750000e-02 -4.560000e-02 4.457000e-01 2.517000e-01
+ 5.050000e-01 6.070000e-01 8.627000e-01 8.609000e-01 8.843000e-01 1.080400e+00 1.662900e+00 7.810000e-02 9.900000e-02 1.886300e+00 -5.900000e-02 1.799200e+00 4.807000e-01 5.152000e-01
+ 4.083000e-01 6.277000e-01 5.078000e-01 1.137300e+00 5.682000e-01 1.305400e+00 6.229000e-01 1.002100e+00 6.980000e-01 1.180200e+00 6.487000e-01 9.710000e-01 3.243000e-01 6.188000e-01
+ 5.610000e-01 1.063000e-01 7.032000e-01 3.773000e-01 7.404000e-01 4.993000e-01 7.263000e-01 3.493000e-01 8.452000e-01 3.924000e-01 6.268000e-01 4.698000e-01 4.199000e-01 2.013000e-01
+-4.600000e-03 4.540000e-01 1.915000e-01 5.706000e-01 3.021000e-01 5.829000e-01 -6.530000e-02 1.807800e+00 -4.130000e-02 4.920000e-02 9.000000e-03 -1.080000e-02 1.095000e-01 3.238000e-01
+ 4.558000e-01 6.417000e-01 7.385000e-01 7.060000e-01 9.107000e-01 6.386000e-01 1.640500e+00 1.095000e-01 -1.430000e-02 2.021500e+00 -6.380000e-02 1.806700e+00 4.203000e-01 4.233000e-01
+ 3.749000e-01 3.478000e-01 6.614000e-01 3.763000e-01 8.617000e-01 2.874000e-01 1.680900e+00 6.080000e-02 5.110000e-02 1.937900e+00 -4.430000e-02 5.130000e-02 3.498000e-01 2.637000e-01
+ 1.172000e+00 -7.900000e-02 5.569000e-01 2.045000e-01 5.080000e-01 1.028000e-01 5.461000e-01 2.194000e-01 4.582000e-01 1.502000e-01 6.272000e-01 1.214000e-01 4.147000e-01 9.840000e-02
+ 1.114400e+00 -1.480000e-02 6.321000e-01 8.080000e-02 5.397000e-01 4.140000e-02 5.880000e-01 1.360000e-01 4.192000e-01 1.711000e-01 5.523000e-01 1.767000e-01 4.362000e-01 5.740000e-02
+ 4.172000e-01 4.878000e-01 7.567000e-01 4.570000e-01 8.219000e-01 5.137000e-01 1.698300e+00 4.280000e-02 -2.360000e-02 2.029800e+00 -3.000000e-04 2.000000e-03 4.055000e-01 3.136000e-01
+ 3.340000e-02 9.060000e-02 1.528000e-01 2.821000e-01 2.778000e-01 2.702000e-01 3.840000e-02 1.689500e+00 8.330000e-02 -1.008000e-01 7.790000e-02 -9.420000e-02 6.010000e-02 1.798000e-01
+ 1.125000e+00 -1.910000e-02 6.254000e-01 1.100600e+00 3.906000e-01 8.229000e-01 6.111000e-01 1.120500e+00 4.252000e-01 7.403000e-01 6.368000e-01 1.083800e+00 4.206000e-01 4.015000e-01
+ 6.356000e-01 4.044000e-01 8.279000e-01 8.315000e-01 9.620000e-01 9.261000e-01 8.210000e-01 8.397000e-01 1.001200e+00 9.082000e-01 8.058000e-01 8.573000e-01 4.849000e-01 4.709000e-01
+ 1.061500e+00 5.180000e-02 7.158000e-01 1.021300e+00 6.573000e-01 4.111000e-01 7.008000e-01 1.036400e+00 5.019000e-01 5.624000e-01 7.263000e-01 1.009700e+00 4.987000e-01 2.754000e-01
+ 1.755000e-01 3.300000e-01 3.664000e-01 4.088000e-01 3.820000e-01 5.115000e-01 3.903000e-01 3.836000e-01 4.519000e-01 4.405000e-01 2.869000e-01 5.086000e-01 1.966000e-01 2.555000e-01
+ 2.700000e-02 1.430000e-02 3.190000e-02 3.198000e-01 3.130000e-02 4.564000e-01 -1.280000e-02 1.590000e-02 -6.610000e-02 7.780000e-02 -8.400000e-03 8.100000e-03 1.590000e-02 1.756000e-01
+ 4.663000e-01 -4.600000e-03 7.408000e-01 4.170000e-02 9.221000e-01 -2.920000e-02 1.735200e+00 -1.600000e-03 7.200000e-02 -8.600000e-02 3.510000e-02 -4.170000e-02 4.131000e-01 3.500000e-02
+ 3.971000e-01 1.871000e-01 6.050000e-01 3.133000e-01 6.799000e-01 3.637000e-01 5.564000e-01 3.703000e-01 7.048000e-01 3.569000e-01 5.925000e-01 3.298000e-01 3.250000e-01 2.095000e-01
+ 1.881000e-01 2.852000e-01 4.351000e-01 3.640000e-01 5.826000e-01 3.299000e-01 1.650900e+00 9.930000e-02 -7.760000e-02 9.370000e-02 3.610000e-02 -4.430000e-02 2.443000e-01 2.118000e-01
+ 7.071000e-01 4.006000e-01 3.163000e-01 1.421300e+00 2.512000e-01 1.208400e+00 3.451000e-01 1.385900e+00 3.214000e-01 1.050100e+00 3.445000e-01 1.383400e+00 2.696000e-01 6.342000e-01
+ 6.183000e-01 4.862000e-01 8.977000e-01 8.166000e-01 1.012400e+00 8.138000e-01 1.728200e+00 4.700000e-03 2.084100e+00 -9.930000e-02 -2.800000e-03 1.733200e+00 5.319000e-01 4.307000e-01
+ 7.480000e-01 2.723000e-01 1.088900e+00 5.234000e-01 1.335700e+00 4.807000e-01 1.197500e+00 3.954000e-01 1.335600e+00 5.135000e-01 1.092100e+00 5.162000e-01 6.428000e-01 2.840000e-01
+ 1.220000e-02 4.463000e-01 3.817000e-01 3.752000e-01 4.660000e-01 4.161000e-01 -3.900000e-02 1.775400e+00 2.450000e-02 -2.800000e-02 -2.000000e-02 2.500000e-02 1.909000e-01 2.436000e-01
+ 4.191000e-01 7.018000e-01 7.914000e-01 6.675000e-01 1.036300e+00 5.181000e-01 1.751200e+00 -2.240000e-02 -1.037000e-01 2.116700e+00 0.000000e+00 1.728200e+00 4.685000e-01 3.836000e-01
+ 5.673000e-01 5.564000e-01 3.051000e-01 8.024000e-01 2.870000e-01 5.490000e-01 3.614000e-01 7.325000e-01 2.592000e-01 5.638000e-01 3.773000e-01 7.175000e-01 2.555000e-01 3.958000e-01
+ 5.016000e-01 1.766000e-01 7.704000e-01 2.323000e-01 9.845000e-01 1.148000e-01 1.770000e+00 -4.560000e-02 -1.452000e-01 2.174300e+00 2.120000e-02 -2.570000e-02 4.619000e-01 1.186000e-01
+ 3.840000e-02 4.844000e-01 2.104000e-01 6.335000e-01 3.165000e-01 6.526000e-01 7.100000e-03 1.721900e+00 2.400000e-02 -3.070000e-02 -3.000000e-04 -1.300000e-03 1.120000e-01 3.751000e-01
+ 6.022000e-01 4.851000e-01 9.271000e-01 4.647000e-01 1.181800e+00 3.103000e-01 1.728700e+00 6.000000e-04 2.143800e+00 -1.692000e-01 2.100000e-02 1.706200e+00 6.078000e-01 2.020000e-01
+ 4.198000e-01 1.360000e-01 7.111000e-01 1.498000e-01 7.691000e-01 2.240000e-01 7.126000e-01 1.501000e-01 8.195000e-01 1.747000e-01 7.925000e-01 5.360000e-02 4.255000e-01 6.960000e-02
+ 4.734000e-01 3.226000e-01 7.321000e-01 5.214000e-01 6.302000e-01 8.344000e-01 7.167000e-01 5.399000e-01 8.025000e-01 6.486000e-01 7.317000e-01 5.186000e-01 3.959000e-01 3.324000e-01
+ 1.976000e-01 2.874000e-01 5.407000e-01 2.456000e-01 6.124000e-01 3.106000e-01 1.765600e+00 -4.640000e-02 -8.500000e-03 1.090000e-02 -3.050000e-02 3.470000e-02 2.834000e-01 1.711000e-01
+ 2.472000e-01 3.830000e-01 4.762000e-01 4.897000e-01 5.658000e-01 5.331000e-01 4.445000e-01 5.271000e-01 4.262000e-01 7.066000e-01 5.670000e-01 3.801000e-01 2.861000e-01 2.700000e-01
+ 5.640000e-02 8.770000e-01 4.094000e-01 8.304000e-01 4.598000e-01 9.081000e-01 1.685500e+00 5.740000e-02 -1.160000e-02 2.014600e+00 3.930000e-02 -4.600000e-02 2.180000e-01 5.141000e-01
+ 5.933000e-01 5.124000e-01 3.009000e-01 4.766000e-01 2.757000e-01 3.484000e-01 2.430000e-01 5.403000e-01 2.414000e-01 3.768000e-01 3.809000e-01 3.783000e-01 2.467000e-01 2.736000e-01
+ 4.738000e-01 1.604000e-01 6.897000e-01 3.143000e-01 7.454000e-01 4.065000e-01 7.160000e-01 2.829000e-01 7.983000e-01 3.611000e-01 6.483000e-01 3.632000e-01 4.115000e-01 1.662000e-01
+ 3.263000e-01 2.228000e-01 5.195000e-01 3.371000e-01 6.499000e-01 3.145000e-01 6.013000e-01 2.391000e-01 5.882000e-01 3.997000e-01 4.504000e-01 4.183000e-01 2.802000e-01 2.177000e-01
+ 2.952000e-01 3.011000e-01 3.907000e-01 5.610000e-01 5.623000e-01 4.951000e-01 3.930000e-01 5.548000e-01 5.071000e-01 5.777000e-01 3.762000e-01 5.773000e-01 2.368000e-01 3.101000e-01
+ 8.571000e-01 2.373000e-01 3.476000e-01 9.596000e-01 2.949000e-01 6.430000e-01 3.605000e-01 9.372000e-01 2.396000e-01 6.787000e-01 4.688000e-01 8.115000e-01 2.814000e-01 4.219000e-01
+-8.300000e-02 7.175000e-01 2.840000e-02 8.961000e-01 4.980000e-02 1.014500e+00 -5.960000e-02 1.804600e+00 -2.070000e-02 2.025200e+00 1.300000e-03 -1.400000e-03 2.410000e-02 5.176000e-01
+ 4.093000e-01 4.796000e-01 7.052000e-01 4.992000e-01 8.137000e-01 5.116000e-01 1.559200e+00 2.063000e-01 -9.400000e-03 2.012900e+00 -4.380000e-02 5.310000e-02 3.694000e-01 3.477000e-01
+-4.070000e-02 3.429000e-01 1.610000e-01 4.522000e-01 3.372000e-01 3.839000e-01 -1.471000e-01 1.905600e+00 2.170000e-02 -2.650000e-02 1.330000e-02 -1.820000e-02 1.011000e-01 2.359000e-01
+ 4.655000e-01 3.234000e-01 6.666000e-01 5.896000e-01 7.814000e-01 6.414000e-01 7.481000e-01 4.871000e-01 8.391000e-01 5.939000e-01 7.590000e-01 4.738000e-01 4.179000e-01 2.995000e-01
+ 3.631000e-01 3.763000e-01 5.393000e-01 6.265000e-01 6.059000e-01 7.227000e-01 5.750000e-01 5.816000e-01 5.516000e-01 8.031000e-01 5.457000e-01 6.142000e-01 3.099000e-01 3.612000e-01
+ 4.637000e-01 4.033000e-01 7.539000e-01 5.969000e-01 7.829000e-01 7.668000e-01 6.929000e-01 6.707000e-01 9.046000e-01 6.521000e-01 7.372000e-01 6.145000e-01 4.322000e-01 3.487000e-01
+ 1.090400e+00 1.660000e-02 6.835000e-01 4.016000e-01 5.539000e-01 2.774000e-01 6.936000e-01 3.948000e-01 5.649000e-01 2.433000e-01 7.215000e-01 3.609000e-01 4.784000e-01 1.675000e-01
+ 1.758000e-01 4.663000e-01 4.616000e-01 5.046000e-01 5.843000e-01 4.959000e-01 1.676900e+00 5.550000e-02 1.330000e-02 1.984500e+00 2.270000e-02 -2.750000e-02 2.619000e-01 2.967000e-01
+ 1.214600e+00 -1.319000e-01 5.935000e-01 5.697000e-01 5.128000e-01 3.593000e-01 5.345000e-01 6.402000e-01 4.997000e-01 3.476000e-01 5.162000e-01 6.670000e-01 4.160000e-01 2.552000e-01
+ 1.620000e-02 -1.880000e-02 2.976000e-01 1.450000e-02 4.584000e-01 -3.510000e-02 -5.870000e-02 6.980000e-02 9.900000e-03 -9.800000e-03 -5.190000e-02 6.120000e-02 1.608000e-01 6.200000e-03
+ 9.487000e-01 1.556000e-01 4.250000e-01 9.013000e-01 2.160000e-01 7.500000e-01 5.109000e-01 8.025000e-01 3.473000e-01 5.742000e-01 4.527000e-01 8.721000e-01 2.995000e-01 4.109000e-01
+ 4.073000e-01 6.996000e-01 6.978000e-01 8.732000e-01 8.389000e-01 8.391000e-01 1.652000e+00 9.270000e-02 -9.850000e-02 2.120300e+00 4.640000e-02 1.671500e+00 4.081000e-01 4.970000e-01
+ 1.179000e-01 3.772000e-01 1.543000e-01 6.233000e-01 2.983000e-01 5.636000e-01 2.670000e-01 4.901000e-01 2.800000e-01 6.010000e-01 1.791000e-01 5.917000e-01 1.150000e-01 3.288000e-01
+ 3.808000e-01 3.059000e-01 7.125000e-01 2.866000e-01 8.360000e-01 2.762000e-01 1.762600e+00 -4.020000e-02 -1.060000e-02 2.014400e+00 -2.190000e-02 2.570000e-02 3.894000e-01 1.950000e-01
+ 1.100700e+00 6.400000e-03 6.214000e-01 1.107200e+00 5.279000e-01 5.033000e-01 6.244000e-01 1.102300e+00 4.872000e-01 5.243000e-01 4.857000e-01 1.269100e+00 4.358000e-01 3.183000e-01
+ 4.452000e-01 4.062000e-01 7.953000e-01 5.247000e-01 7.304000e-01 8.046000e-01 7.100000e-01 6.177000e-01 8.594000e-01 6.745000e-01 7.237000e-01 6.055000e-01 4.072000e-01 3.626000e-01
+-3.360000e-02 7.815000e-01 3.460000e-01 6.911000e-01 3.249000e-01 8.536000e-01 -7.280000e-02 1.825300e+00 1.340000e-02 1.986900e+00 1.620000e-02 -2.070000e-02 1.240000e-01 4.968000e-01
+ 4.713000e-01 4.386000e-01 7.293000e-01 6.936000e-01 7.792000e-01 8.545000e-01 6.979000e-01 7.343000e-01 8.918000e-01 7.419000e-01 6.975000e-01 7.380000e-01 4.071000e-01 4.185000e-01
+ 4.175000e-01 3.384000e-01 7.303000e-01 4.443000e-01 7.458000e-01 6.023000e-01 6.859000e-01 4.919000e-01 8.668000e-01 4.794000e-01 7.331000e-01 4.360000e-01 4.239000e-01 2.520000e-01
+ 5.340000e-02 9.828000e-01 2.123000e-01 1.128600e+00 2.351000e-01 1.247100e+00 -7.900000e-02 1.824500e+00 -3.620000e-02 2.039300e+00 -1.320000e-02 1.450000e-02 8.350000e-02 7.143000e-01
+ 3.388000e-01 3.895000e-01 4.675000e-01 6.855000e-01 4.922000e-01 8.274000e-01 5.061000e-01 6.373000e-01 6.460000e-01 6.687000e-01 4.679000e-01 6.876000e-01 2.774000e-01 3.879000e-01
+ 3.247000e-01 5.262000e-01 6.185000e-01 5.524000e-01 7.928000e-01 4.811000e-01 1.705800e+00 3.450000e-02 -1.000000e-04 1.999800e+00 -2.430000e-02 2.690000e-02 3.642000e-01 3.209000e-01
+ 4.667000e-01 2.944000e-01 8.114000e-01 3.655000e-01 8.536000e-01 4.975000e-01 7.835000e-01 3.983000e-01 9.240000e-01 4.428000e-01 7.512000e-01 4.366000e-01 4.402000e-01 2.459000e-01
+ 1.345000e-01 6.435000e-01 3.909000e-01 7.931000e-01 5.475000e-01 7.823000e-01 3.952000e-01 7.846000e-01 3.832000e-01 9.931000e-01 3.469000e-01 8.406000e-01 2.234000e-01 4.592000e-01
+ 1.940000e-01 4.096000e-01 2.225000e-01 7.397000e-01 2.921000e-01 7.992000e-01 3.331000e-01 6.114000e-01 3.428000e-01 7.537000e-01 2.425000e-01 7.173000e-01 1.560000e-01 3.954000e-01
+ 5.959000e-01 1.939000e-01 9.863000e-01 2.432000e-01 1.230900e+00 1.453000e-01 1.001400e+00 2.181000e-01 1.243900e+00 1.572000e-01 9.501000e-01 2.823000e-01 5.876000e-01 1.168000e-01
+ 3.336000e-01 3.599000e-01 5.074000e-01 5.793000e-01 6.561000e-01 5.657000e-01 6.266000e-01 4.343000e-01 6.604000e-01 5.796000e-01 6.106000e-01 4.549000e-01 3.467000e-01 2.701000e-01
+ 4.059000e-01 3.798000e-01 7.394000e-01 3.555000e-01 8.615000e-01 3.500000e-01 1.760300e+00 -3.010000e-02 -1.800000e-02 2.019900e+00 -1.730000e-02 2.140000e-02 4.065000e-01 2.399000e-01
+ 3.310000e-01 6.894000e-01 7.105000e-01 6.104000e-01 8.226000e-01 6.172000e-01 1.725000e+00 9.400000e-03 1.055000e-01 1.875200e+00 3.200000e-03 -3.000000e-03 4.023000e-01 3.763000e-01
+ 7.199000e-01 3.784000e-01 2.954000e-01 7.870000e-01 2.970000e-01 5.207000e-01 2.539000e-01 8.341000e-01 2.433000e-01 5.660000e-01 3.386000e-01 7.365000e-01 2.611000e-01 3.805000e-01
+ 3.351000e-01 4.388000e-01 4.206000e-01 8.098000e-01 5.920000e-01 7.924000e-01 4.709000e-01 7.497000e-01 6.010000e-01 8.065000e-01 4.283000e-01 8.027000e-01 2.862000e-01 4.179000e-01
+ 2.682000e-01 2.294000e-01 5.970000e-01 1.434000e-01 7.460000e-01 8.930000e-02 5.686000e-01 1.835000e-01 6.593000e-01 2.042000e-01 6.265000e-01 1.115000e-01 3.488000e-01 8.000000e-02
+ 1.968000e-01 5.983000e-01 4.334000e-01 6.884000e-01 6.950000e-01 5.214000e-01 1.668500e+00 7.710000e-02 -1.500000e-02 2.017100e+00 -9.400000e-03 1.200000e-02 2.899000e-01 3.599000e-01
+ 2.743000e-01 2.707000e-01 5.182000e-01 3.226000e-01 6.576000e-01 2.817000e-01 4.334000e-01 4.222000e-01 5.786000e-01 3.969000e-01 5.792000e-01 2.490000e-01 3.026000e-01 1.813000e-01
+ 1.044000e-01 8.482000e-01 2.786000e-01 1.196100e+00 3.187000e-01 1.361100e+00 2.881000e-01 1.186100e+00 2.551000e-01 1.462000e+00 2.166000e-01 1.271600e+00 1.215000e-01 7.374000e-01
+ 7.940000e-02 7.657000e-01 3.944000e-01 7.673000e-01 4.722000e-01 8.130000e-01 1.623500e+00 1.296000e-01 1.566000e-01 1.815600e+00 8.910000e-02 -1.058000e-01 2.025000e-01 4.817000e-01
+ 5.802000e-01 5.219000e-01 2.736000e-01 3.596000e-01 3.377000e-01 1.663000e-01 3.450000e-01 2.768000e-01 2.607000e-01 2.504000e-01 2.996000e-01 3.324000e-01 2.310000e-01 2.168000e-01
+ 3.435000e-01 7.501000e-01 1.504000e-01 4.932000e-01 6.410000e-02 4.826000e-01 2.269000e-01 4.038000e-01 1.557000e-01 3.627000e-01 6.050000e-02 6.040000e-01 1.334000e-01 3.200000e-01
+ 2.296000e-01 8.800000e-02 3.187000e-01 1.855000e-01 3.660000e-01 2.098000e-01 2.541000e-01 2.652000e-01 3.659000e-01 2.184000e-01 1.360000e-01 4.045000e-01 1.647000e-01 1.304000e-01
+ 3.472000e-01 5.533000e-01 7.213000e-01 6.622000e-01 7.495000e-01 8.366000e-01 6.911000e-01 7.010000e-01 7.428000e-01 8.703000e-01 6.704000e-01 7.270000e-01 3.956000e-01 4.075000e-01
+ 1.127300e+00 -2.910000e-02 5.484000e-01 3.926000e-01 4.000000e-01 3.540000e-01 4.938000e-01 4.571000e-01 3.574000e-01 3.831000e-01 3.600000e-01 6.188000e-01 3.658000e-01 2.323000e-01
+ 3.116000e-01 2.006000e-01 6.378000e-01 1.844000e-01 8.251000e-01 1.021000e-01 1.731600e+00 -2.000000e-03 -5.400000e-03 5.500000e-03 6.160000e-02 -7.480000e-02 3.810000e-01 8.600000e-02
+ 5.224000e-01 9.700000e-03 7.253000e-01 1.418000e-01 8.516000e-01 1.318000e-01 1.666600e+00 7.340000e-02 1.910000e-02 -1.910000e-02 -1.140000e-02 1.270000e-02 4.417000e-01 5.310000e-02
+ 5.163000e-01 2.570000e-01 7.918000e-01 4.214000e-01 9.877000e-01 3.764000e-01 8.280000e-01 3.769000e-01 9.526000e-01 4.407000e-01 8.246000e-01 3.858000e-01 4.710000e-01 2.260000e-01
+ 5.571000e-01 5.544000e-01 2.239000e-01 9.428000e-01 2.880000e-01 5.743000e-01 3.653000e-01 7.806000e-01 2.938000e-01 5.459000e-01 3.241000e-01 8.265000e-01 2.381000e-01 4.292000e-01
+ 2.273000e-01 4.148000e-01 3.511000e-01 6.521000e-01 3.822000e-01 7.671000e-01 4.646000e-01 5.182000e-01 4.037000e-01 7.567000e-01 3.498000e-01 6.586000e-01 2.074000e-01 3.724000e-01
+ 4.839000e-01 4.570000e-02 1.840000e-01 1.079000e-01 2.019000e-01 4.250000e-02 3.259000e-01 -6.090000e-02 2.704000e-01 -4.380000e-02 2.482000e-01 3.690000e-02 2.045000e-01 1.880000e-02
+ 1.147700e+00 -5.040000e-02 1.158300e+00 1.214000e-01 9.293000e-01 -1.200000e-02 1.340100e+00 -9.310000e-02 8.479000e-01 5.720000e-02 1.220500e+00 5.150000e-02 6.836000e-01 1.350000e-02
+ 6.989000e-01 3.994000e-01 1.083100e+00 3.312000e-01 1.074000e+00 4.911000e-01 1.831800e+00 -1.164000e-01 2.058700e+00 -6.400000e-02 3.980000e-02 1.682900e+00 5.699000e-01 2.715000e-01
+ 3.769000e-01 4.515000e-01 1.789000e-01 1.933000e-01 1.166000e-01 2.087000e-01 1.752000e-01 1.987000e-01 1.617000e-01 1.490000e-01 1.450000e-01 2.362000e-01 1.476000e-01 1.407000e-01
+ 4.429000e-01 2.770000e-01 6.559000e-01 4.770000e-01 7.533000e-01 5.333000e-01 6.636000e-01 4.679000e-01 9.272000e-01 3.484000e-01 6.942000e-01 4.315000e-01 4.358000e-01 2.080000e-01
+ 2.678000e-01 5.396000e-01 5.706000e-01 5.481000e-01 7.094000e-01 5.272000e-01 1.753200e+00 -2.780000e-02 1.525000e-01 1.819500e+00 -8.100000e-03 9.300000e-03 3.374000e-01 3.192000e-01
+ 5.565000e-01 2.985000e-01 9.732000e-01 1.751000e-01 1.013600e+00 2.741000e-01 1.663900e+00 7.960000e-02 1.972300e+00 3.250000e-02 -3.070000e-02 3.620000e-02 5.264000e-01 1.595000e-01
+ 7.532000e-01 3.612000e-01 3.635000e-01 1.018900e+00 3.685000e-01 5.868000e-01 3.342000e-01 1.066600e+00 2.891000e-01 6.552000e-01 3.743000e-01 1.018400e+00 2.811000e-01 4.401000e-01
+ 1.108600e+00 -5.300000e-03 4.635000e-01 1.283100e+00 4.084000e-01 1.442300e+00 4.793000e-01 1.265200e+00 4.490000e-01 1.193800e+00 6.262000e-01 1.087300e+00 3.890000e-01 5.791000e-01
+ 4.608000e-01 2.314000e-01 8.328000e-01 2.275000e-01 8.639000e-01 3.633000e-01 8.046000e-01 2.602000e-01 8.485000e-01 3.997000e-01 7.342000e-01 3.473000e-01 4.069000e-01 2.202000e-01
+ 4.410000e-02 5.566000e-01 2.636000e-01 6.394000e-01 3.755000e-01 6.360000e-01 2.398000e-01 6.638000e-01 2.537000e-01 7.974000e-01 2.936000e-01 6.030000e-01 1.496000e-01 3.718000e-01
+ 5.005000e-01 3.034000e-01 7.548000e-01 5.121000e-01 1.041600e+00 3.657000e-01 8.597000e-01 3.826000e-01 1.028000e+00 4.066000e-01 7.794000e-01 4.771000e-01 4.800000e-01 2.417000e-01
+ 3.198000e-01 7.901000e-01 1.934000e-01 7.084000e-01 1.126000e-01 6.119000e-01 1.950000e-01 7.055000e-01 1.826000e-01 5.124000e-01 2.665000e-01 6.217000e-01 1.639000e-01 4.130000e-01
+ 5.197000e-01 2.097000e-01 8.513000e-01 1.889000e-01 9.604000e-01 2.034000e-01 1.725800e+00 3.100000e-03 -2.900000e-02 2.034500e+00 -4.180000e-02 4.840000e-02 4.906000e-01 1.207000e-01
+ 2.273000e-01 7.301000e-01 3.601000e-01 1.140200e+00 3.853000e-01 1.328900e+00 3.072000e-01 1.205700e+00 4.551000e-01 1.270000e+00 3.043000e-01 1.206400e+00 2.025000e-01 6.645000e-01
+-4.580000e-02 5.081000e-01 -4.810000e-02 8.260000e-01 -1.860000e-02 9.120000e-01 4.690000e-02 1.675900e+00 -5.220000e-02 6.230000e-02 -4.100000e-03 6.400000e-03 2.600000e-03 4.344000e-01
+ 1.116200e+00 -1.530000e-02 5.599000e-01 5.398000e-01 3.945000e-01 4.545000e-01 5.338000e-01 5.660000e-01 4.524000e-01 3.615000e-01 5.239000e-01 5.846000e-01 3.863000e-01 2.650000e-01
+ 6.918000e-01 3.518000e-01 1.041500e+00 3.028000e-01 1.083500e+00 3.950000e-01 1.622800e+00 1.234000e-01 2.023100e+00 -2.900000e-02 6.140000e-02 -7.670000e-02 6.017000e-01 1.932000e-01
+ 7.017000e-01 3.974000e-01 1.022200e+00 5.651000e-01 1.199500e+00 5.025000e-01 1.701000e+00 3.670000e-02 2.096700e+00 -1.106000e-01 6.400000e-03 1.724200e+00 5.794000e-01 3.400000e-01
+ 3.025000e-01 4.500000e-01 6.046000e-01 4.666000e-01 7.225000e-01 4.639000e-01 1.802600e+00 -8.280000e-02 -4.240000e-02 2.049500e+00 -3.420000e-02 4.300000e-02 3.911000e-01 2.265000e-01
+ 2.405000e-01 4.124000e-01 3.764000e-01 6.427000e-01 4.999000e-01 6.475000e-01 4.117000e-01 5.978000e-01 4.728000e-01 6.944000e-01 4.349000e-01 5.751000e-01 2.347000e-01 3.495000e-01
+ 3.042000e-01 4.975000e-01 5.813000e-01 5.410000e-01 5.880000e-01 6.772000e-01 1.773000e+00 -4.970000e-02 4.010000e-02 1.949800e+00 5.640000e-02 -6.680000e-02 3.048000e-01 3.605000e-01
+ 5.140000e-02 6.044000e-01 1.610000e-01 8.481000e-01 1.999000e-01 9.469000e-01 1.133000e-01 9.042000e-01 1.455000e-01 1.025900e+00 1.506000e-01 8.629000e-01 8.570000e-02 4.987000e-01
+ 7.159000e-01 3.887000e-01 3.221000e-01 2.428000e-01 3.203000e-01 1.404000e-01 3.725000e-01 1.797000e-01 2.981000e-01 1.584000e-01 2.153000e-01 3.692000e-01 2.653000e-01 1.412000e-01
+ 1.038500e+00 8.070000e-02 6.368000e-01 2.733000e-01 5.443000e-01 1.728000e-01 5.774000e-01 3.499000e-01 5.029000e-01 2.067000e-01 6.043000e-01 3.120000e-01 4.286000e-01 1.561000e-01
+ 3.442000e-01 6.581000e-01 6.663000e-01 8.721000e-01 5.898000e-01 1.199100e+00 6.507000e-01 8.913000e-01 5.997000e-01 1.210200e+00 5.873000e-01 9.664000e-01 3.414000e-01 5.564000e-01
+ 4.650000e-02 1.068000e+00 1.044000e-01 4.719000e-01 1.267000e-01 3.475000e-01 2.050000e-02 5.726000e-01 7.500000e-03 4.792000e-01 5.820000e-02 5.241000e-01 4.720000e-02 3.740000e-01
+ 5.320000e-02 3.194000e-01 1.602000e-01 4.124000e-01 1.449000e-01 5.132000e-01 1.762000e-01 3.928000e-01 2.111000e-01 4.435000e-01 1.781000e-01 3.896000e-01 1.014000e-01 2.271000e-01
+ 1.134400e+00 -3.280000e-02 3.935000e-01 4.392000e-01 3.808000e-01 2.835000e-01 4.294000e-01 3.987000e-01 3.470000e-01 3.056000e-01 3.493000e-01 4.929000e-01 3.117000e-01 2.354000e-01
+ 3.821000e-01 6.817000e-01 6.292000e-01 7.613000e-01 8.703000e-01 6.141000e-01 1.673400e+00 6.200000e-02 5.200000e-03 1.990900e+00 -8.500000e-02 1.832600e+00 4.190000e-01 3.872000e-01
+ 3.013000e-01 6.641000e-01 5.217000e-01 9.826000e-01 5.665000e-01 1.154200e+00 6.320000e-01 8.539000e-01 7.991000e-01 9.070000e-01 5.886000e-01 9.031000e-01 3.344000e-01 5.291000e-01
+ 1.506000e-01 7.948000e-01 1.765000e-01 1.315600e+00 2.341000e-01 1.461900e+00 2.085000e-01 1.275200e+00 2.402000e-01 1.474500e+00 1.973000e-01 1.294000e+00 1.203000e-01 7.379000e-01
+ 3.685000e-01 6.324000e-01 7.840000e-01 5.156000e-01 8.288000e-01 6.059000e-01 1.723100e+00 1.340000e-02 -3.700000e-02 2.044700e+00 -1.219000e-01 1.448000e-01 4.090000e-01 3.624000e-01
+ 4.675000e-01 3.226000e-01 7.764000e-01 4.498000e-01 8.159000e-01 5.908000e-01 6.720000e-01 5.726000e-01 8.785000e-01 5.377000e-01 5.608000e-01 7.012000e-01 4.061000e-01 3.096000e-01
+ 1.032100e+00 8.920000e-02 7.001000e-01 1.020800e+00 4.296000e-01 1.249300e+00 5.637000e-01 1.175500e+00 5.356000e-01 1.006600e+00 5.957000e-01 1.141200e+00 4.473000e-01 5.009000e-01
+ 3.537000e-01 3.750000e-01 5.846000e-01 5.609000e-01 6.021000e-01 7.155000e-01 5.300000e-01 6.204000e-01 6.224000e-01 7.085000e-01 5.285000e-01 6.264000e-01 3.126000e-01 3.522000e-01
+ 1.028400e+00 5.230000e-02 3.442000e-01 1.396500e+00 2.839000e-01 1.006800e+00 3.878000e-01 1.345500e+00 4.144000e-01 8.010000e-01 3.794000e-01 1.354100e+00 2.956000e-01 5.589000e-01
+ 1.255500e+00 -1.783000e-01 9.172000e-01 1.170000e-02 6.671000e-01 7.930000e-02 8.712000e-01 6.710000e-02 6.812000e-01 4.210000e-02 9.521000e-01 -2.890000e-02 5.712000e-01 2.040000e-02
+ 3.895000e-01 2.033000e-01 5.606000e-01 3.758000e-01 7.402000e-01 3.101000e-01 5.242000e-01 4.181000e-01 7.393000e-01 3.271000e-01 5.231000e-01 4.183000e-01 3.386000e-01 1.988000e-01
+ 3.526000e-01 7.464000e-01 6.101000e-01 9.113000e-01 7.630000e-01 8.749000e-01 1.689900e+00 4.920000e-02 1.480000e-02 1.983400e+00 -1.200000e-02 1.745600e+00 3.587000e-01 5.248000e-01
+-1.300000e-03 6.041000e-01 4.347000e-01 4.496000e-01 4.004000e-01 6.332000e-01 2.100000e-03 1.727800e+00 -8.340000e-02 2.097300e+00 1.580000e-02 -1.760000e-02 1.446000e-01 3.869000e-01
+ 3.791000e-01 2.530000e-01 8.544000e-01 6.310000e-02 8.729000e-01 1.838000e-01 1.780600e+00 -5.310000e-02 7.710000e-02 1.912700e+00 -3.610000e-02 4.320000e-02 4.118000e-01 1.349000e-01
+ 1.102700e+00 1.900000e-03 4.964000e-01 5.009000e-01 3.975000e-01 3.810000e-01 5.711000e-01 4.126000e-01 3.578000e-01 4.157000e-01 6.194000e-01 3.533000e-01 3.989000e-01 2.126000e-01
+ 3.882000e-01 4.172000e-01 6.311000e-01 6.264000e-01 8.014000e-01 6.151000e-01 7.290000e-01 5.097000e-01 8.256000e-01 6.037000e-01 7.664000e-01 4.626000e-01 4.112000e-01 3.055000e-01
+ 1.168300e+00 -7.610000e-02 5.486000e-01 1.192800e+00 5.699000e-01 4.532000e-01 5.990000e-01 1.130500e+00 4.773000e-01 5.330000e-01 5.430000e-01 1.196900e+00 4.272000e-01 3.263000e-01
+ 1.946000e-01 8.463000e-01 3.935000e-01 9.886000e-01 5.201000e-01 9.817000e-01 1.797000e+00 -7.150000e-02 -9.460000e-02 2.110900e+00 3.840000e-02 1.683800e+00 2.347000e-01 5.724000e-01
+ 2.387000e-01 5.287000e-01 5.081000e-01 5.803000e-01 6.225000e-01 5.861000e-01 1.602700e+00 1.486000e-01 -3.100000e-03 2.002900e+00 -3.110000e-02 3.750000e-02 2.908000e-01 3.475000e-01
+ 3.340000e-02 8.543000e-01 9.320000e-02 1.295600e+00 1.707000e-01 1.390800e+00 3.800000e-02 1.361300e+00 1.285000e-01 1.471100e+00 7.590000e-02 1.317000e+00 5.550000e-02 7.455000e-01
+ 2.603000e-01 6.276000e-01 6.728000e-01 5.071000e-01 7.549000e-01 5.508000e-01 1.824600e+00 -1.058000e-01 -1.900000e-02 2.019000e+00 -7.000000e-04 4.000000e-04 3.338000e-01 3.718000e-01
+ 5.957000e-01 4.387000e-01 8.091000e-01 8.394000e-01 9.161000e-01 9.635000e-01 7.718000e-01 8.814000e-01 1.003700e+00 8.898000e-01 7.043000e-01 9.604000e-01 4.712000e-01 4.792000e-01
+ 1.171000e+00 -7.670000e-02 5.022000e-01 3.121000e-01 4.242000e-01 2.324000e-01 4.959000e-01 3.193000e-01 4.355000e-01 2.043000e-01 5.301000e-01 2.812000e-01 3.576000e-01 1.846000e-01
+ 1.121500e+00 -2.200000e-02 5.811000e-01 1.162100e+00 5.564000e-01 5.510000e-01 6.498000e-01 1.077300e+00 4.685000e-01 6.165000e-01 5.802000e-01 1.157100e+00 4.429000e-01 3.451000e-01
+ 1.084900e+00 2.450000e-02 3.846000e-01 1.362700e+00 4.707000e-01 7.384000e-01 4.256000e-01 1.313500e+00 4.017000e-01 7.778000e-01 4.385000e-01 1.299300e+00 3.552000e-01 4.773000e-01
+ 1.153600e+00 -5.470000e-02 4.680000e-01 3.564000e-01 3.592000e-01 3.113000e-01 3.693000e-01 4.764000e-01 3.756000e-01 2.744000e-01 4.279000e-01 4.029000e-01 3.265000e-01 2.203000e-01
+ 3.748000e-01 2.994000e-01 6.536000e-01 3.408000e-01 8.457000e-01 2.527000e-01 1.739800e+00 -9.200000e-03 -6.410000e-02 2.075800e+00 3.580000e-02 -4.420000e-02 4.033000e-01 1.715000e-01
+ 4.363000e-01 6.118000e-01 7.460000e-01 6.139000e-01 8.352000e-01 6.512000e-01 1.741400e+00 -1.040000e-02 -3.700000e-02 2.039400e+00 4.330000e-02 -5.150000e-02 4.164000e-01 3.860000e-01
+ 1.798000e-01 2.871000e-01 3.085000e-01 4.197000e-01 3.603000e-01 4.645000e-01 2.507000e-01 4.863000e-01 3.400000e-01 5.030000e-01 3.426000e-01 3.776000e-01 1.842000e-01 2.344000e-01
+ 6.403000e-01 5.420000e-02 1.025400e+00 6.470000e-02 1.219600e+00 9.200000e-03 9.108000e-01 1.963000e-01 1.133800e+00 1.292000e-01 9.212000e-01 1.857000e-01 5.768000e-01 5.450000e-02
+ 4.600000e-02 8.990000e-01 1.115000e-01 1.363600e+00 1.098000e-01 1.570600e+00 1.085000e-01 1.371400e+00 2.028000e-01 1.486500e+00 4.920000e-02 1.434900e+00 6.760000e-02 7.827000e-01
+ 3.339000e-01 7.763000e-01 7.458000e-01 7.458000e-01 7.757000e-01 8.486000e-01 1.733500e+00 7.000000e-04 -8.480000e-02 2.099900e+00 -2.780000e-02 1.761200e+00 3.877000e-01 4.879000e-01
+ 2.704000e-01 8.443000e-01 6.167000e-01 8.324000e-01 7.813000e-01 7.781000e-01 1.633800e+00 1.169000e-01 -2.190000e-02 2.026000e+00 -8.870000e-02 1.835800e+00 3.523000e-01 4.940000e-01
+ 3.663000e-01 5.339000e-01 4.716000e-01 9.602000e-01 5.079000e-01 1.127100e+00 5.363000e-01 8.867000e-01 5.595000e-01 1.094100e+00 5.606000e-01 8.596000e-01 3.032000e-01 5.188000e-01
+ 6.235000e-01 1.920000e-01 1.090900e+00 1.685000e-01 1.179400e+00 2.634000e-01 1.007500e+00 2.645000e-01 1.161100e+00 3.117000e-01 1.011000e+00 2.615000e-01 5.957000e-01 1.379000e-01
+-1.810000e-02 6.916000e-01 2.449000e-01 7.359000e-01 4.121000e-01 6.803000e-01 -6.350000e-02 1.805700e+00 7.400000e-02 1.915100e+00 2.490000e-02 -2.860000e-02 1.347000e-01 4.371000e-01
+ 5.528000e-01 5.532000e-01 3.238000e-01 7.494000e-01 2.123000e-01 6.204000e-01 2.494000e-01 8.322000e-01 2.191000e-01 5.926000e-01 2.836000e-01 7.916000e-01 2.339000e-01 4.084000e-01
+ 7.864000e-01 -2.360000e-02 1.092900e+00 1.320000e-01 1.351800e+00 2.660000e-02 1.164200e+00 4.800000e-02 1.409700e+00 -1.860000e-02 1.181300e+00 2.760000e-02 6.641000e-01 3.780000e-02
+ 4.138000e-01 5.768000e-01 7.693000e-01 5.243000e-01 9.692000e-01 4.379000e-01 1.803000e+00 -8.240000e-02 6.050000e-02 1.930600e+00 -1.340000e-02 1.410000e-02 4.108000e-01 3.600000e-01
+ 1.148100e+00 -5.410000e-02 7.842000e-01 2.590000e-01 6.844000e-01 1.110000e-01 8.148000e-01 2.220000e-01 7.089000e-01 6.410000e-02 8.862000e-01 1.437000e-01 5.566000e-01 7.150000e-02
+ 2.915000e-01 6.952000e-01 5.862000e-01 7.160000e-01 8.323000e-01 5.673000e-01 1.671200e+00 7.010000e-02 1.290000e-02 1.985300e+00 7.770000e-02 -9.110000e-02 3.706000e-01 3.907000e-01
+ 1.131700e+00 -3.170000e-02 9.419000e-01 7.896000e-01 7.905000e-01 4.900000e-01 9.017000e-01 8.375000e-01 7.738000e-01 4.528000e-01 9.162000e-01 8.146000e-01 5.810000e-01 2.747000e-01
+ 3.090000e-02 9.172000e-01 3.080000e-02 1.457100e+00 1.922000e-01 1.471700e+00 4.110000e-02 1.445200e+00 1.063000e-01 1.599500e+00 7.490000e-02 1.411300e+00 2.740000e-02 8.309000e-01
+ 6.018000e-01 2.622000e-01 8.873000e-01 2.966000e-01 1.037300e+00 2.592000e-01 1.817400e+00 -9.650000e-02 2.013200e+00 -1.730000e-02 9.150000e-02 -1.097000e-01 5.341000e-01 1.592000e-01
+ 3.616000e-01 5.395000e-01 7.610000e-01 4.352000e-01 8.018000e-01 5.294000e-01 1.749900e+00 -2.180000e-02 6.930000e-02 1.918400e+00 -5.740000e-02 6.910000e-02 4.071000e-01 3.045000e-01
+ 3.879000e-01 5.093000e-01 6.455000e-01 7.533000e-01 7.869000e-01 7.986000e-01 5.291000e-01 8.947000e-01 7.951000e-01 8.101000e-01 6.323000e-01 7.764000e-01 3.686000e-01 4.411000e-01
+ 8.884000e-01 2.158000e-01 1.187600e+00 5.403000e-01 1.283500e+00 5.980000e-01 1.680100e+00 5.840000e-02 2.025300e+00 -3.460000e-02 4.970000e-02 1.670900e+00 6.889000e-01 2.914000e-01
+ 1.108700e+00 2.100000e-03 1.015600e+00 7.153000e-01 7.320000e-01 1.086200e+00 9.141000e-01 8.361000e-01 8.051000e-01 8.140000e-01 9.557000e-01 7.831000e-01 6.136000e-01 3.523000e-01
+ 2.780000e-02 6.130000e-02 2.001000e-01 2.144000e-01 3.815000e-01 1.446000e-01 1.435000e-01 1.563000e+00 -2.340000e-02 2.930000e-02 -1.340000e-02 1.620000e-02 1.320000e-01 8.730000e-02
+ 1.968000e-01 8.896000e-01 4.827000e-01 9.247000e-01 6.167000e-01 9.017000e-01 1.671100e+00 6.720000e-02 -1.130000e-02 2.014000e+00 5.820000e-02 1.666900e+00 2.653000e-01 5.603000e-01
+ 5.201000e-01 5.841000e-01 3.171000e-01 7.668000e-01 3.882000e-01 4.175000e-01 2.912000e-01 8.033000e-01 1.717000e-01 6.541000e-01 2.294000e-01 8.757000e-01 2.216000e-01 4.260000e-01
+ 5.786000e-01 4.950000e-02 8.336000e-01 1.201000e-01 9.328000e-01 1.453000e-01 1.627900e+00 1.184000e-01 1.938700e+00 6.730000e-02 2.700000e-03 -2.800000e-03 5.267000e-01 1.920000e-02
+ 2.881000e-01 5.880000e-01 6.085000e-01 5.808000e-01 7.821000e-01 5.208000e-01 1.687500e+00 5.650000e-02 -1.156000e-01 2.135100e+00 -2.450000e-02 2.660000e-02 3.434000e-01 3.586000e-01
+ 3.163000e-01 4.248000e-01 6.331000e-01 4.197000e-01 7.968000e-01 3.696000e-01 1.673900e+00 6.850000e-02 4.840000e-02 1.946100e+00 1.170000e-02 -1.470000e-02 3.255000e-01 2.971000e-01
+ 1.184100e+00 -9.100000e-02 5.882000e-01 6.830000e-01 4.091000e-01 5.274000e-01 6.264000e-01 6.381000e-01 4.739000e-01 4.277000e-01 5.825000e-01 6.909000e-01 3.942000e-01 3.051000e-01
+ 2.379000e-01 4.085000e-01 3.863000e-01 6.242000e-01 3.834000e-01 7.741000e-01 3.794000e-01 6.333000e-01 3.210000e-01 8.707000e-01 3.651000e-01 6.482000e-01 2.093000e-01 3.762000e-01
+ 1.124500e+00 -2.220000e-02 3.990000e-01 1.027600e+00 2.620000e-01 7.330000e-01 3.240000e-01 1.115300e+00 3.763000e-01 5.714000e-01 4.534000e-01 9.653000e-01 3.213000e-01 4.047000e-01
+ 1.099400e+00 8.000000e-03 8.668000e-01 8.454000e-01 5.698000e-01 1.414000e+00 6.727000e-01 1.078300e+00 5.144000e-01 1.395700e+00 6.686000e-01 1.077000e+00 5.120000e-01 4.853000e-01
+ 3.698000e-01 1.077000e-01 5.224000e-01 2.341000e-01 5.438000e-01 3.333000e-01 5.149000e-01 2.459000e-01 5.892000e-01 2.902000e-01 5.628000e-01 1.885000e-01 3.137000e-01 1.224000e-01
+ 1.164700e+00 -6.970000e-02 3.553000e-01 1.395300e+00 3.349000e-01 1.135600e+00 4.702000e-01 1.253800e+00 3.487000e-01 1.043500e+00 4.655000e-01 1.262100e+00 3.359000e-01 5.706000e-01
+ 3.183000e-01 3.051000e-01 7.323000e-01 2.050000e-01 6.892000e-01 4.029000e-01 4.636000e-01 5.233000e-01 6.157000e-01 5.086000e-01 5.282000e-01 4.456000e-01 3.188000e-01 2.398000e-01
+ 3.521000e-01 3.844000e-01 5.984000e-01 5.504000e-01 6.438000e-01 6.688000e-01 6.586000e-01 4.784000e-01 6.964000e-01 6.264000e-01 5.053000e-01 6.583000e-01 3.198000e-01 3.473000e-01
+ 5.951000e-01 4.877000e-01 8.186000e-01 8.973000e-01 8.961000e-01 1.057700e+00 6.305000e-01 1.118100e+00 8.322000e-01 1.168700e+00 7.579000e-01 9.702000e-01 4.368000e-01 5.585000e-01
+ 3.389000e-01 -2.500000e-03 6.462000e-01 1.600000e-03 8.299000e-01 -7.180000e-02 1.751500e+00 -2.660000e-02 6.930000e-02 -8.170000e-02 -3.820000e-02 4.560000e-02 3.626000e-01 1.000000e-03
+ 5.361000e-01 6.900000e-03 8.957000e-01 -5.900000e-02 8.035000e-01 1.913000e-01 6.853000e-01 1.933000e-01 8.260000e-01 1.836000e-01 8.115000e-01 4.050000e-02 4.454000e-01 5.170000e-02
+ 1.890000e-02 9.513000e-01 1.228000e-01 1.177100e+00 3.517000e-01 1.045600e+00 6.800000e-03 1.724800e+00 -4.210000e-02 2.044000e+00 6.100000e-03 -7.700000e-03 1.020000e-01 6.578000e-01
+ 9.760000e-02 1.367000e-01 1.787000e-01 1.816000e-01 9.870000e-02 3.317000e-01 7.080000e-02 3.092000e-01 1.655000e-01 2.588000e-01 1.027000e-01 2.720000e-01 6.920000e-02 1.454000e-01
+ 8.847000e-01 -4.760000e-02 1.263500e+00 7.040000e-02 1.474600e+00 3.880000e-02 1.329900e+00 -7.300000e-03 1.572400e+00 -5.190000e-02 1.251700e+00 8.340000e-02 7.533000e-01 1.260000e-02
+ 4.102000e-01 6.910000e-01 7.255000e-01 8.216000e-01 8.329000e-01 8.367000e-01 1.705600e+00 3.830000e-02 6.300000e-02 1.926100e+00 -7.250000e-02 1.816300e+00 3.804000e-01 5.212000e-01
+ 1.200200e+00 -1.123000e-01 7.355000e-01 6.722000e-01 6.381000e-01 3.311000e-01 7.752000e-01 6.237000e-01 5.633000e-01 3.925000e-01 7.433000e-01 6.622000e-01 5.182000e-01 2.040000e-01
+ 1.303000e-01 5.144000e-01 2.051000e-01 8.057000e-01 2.318000e-01 9.195000e-01 1.189000e-01 9.103000e-01 2.451000e-01 9.204000e-01 6.870000e-02 9.648000e-01 9.940000e-02 4.873000e-01
+-1.940000e-02 1.128400e+00 9.260000e-02 8.642000e-01 -7.870000e-02 8.619000e-01 1.940000e-02 9.523000e-01 -3.220000e-02 7.856000e-01 1.277000e-01 8.247000e-01 2.400000e-03 6.047000e-01
+ 4.836000e-01 3.774000e-01 8.733000e-01 2.825000e-01 9.310000e-01 3.550000e-01 1.847400e+00 -1.350000e-01 -6.380000e-02 2.077500e+00 -3.200000e-03 5.300000e-03 4.776000e-01 2.108000e-01
+ 3.880000e-01 7.093000e-01 2.357000e-01 1.496200e+00 1.546000e-01 1.279100e+00 2.154000e-01 1.517400e+00 1.865000e-01 1.174800e+00 2.110000e-01 1.518400e+00 1.589000e-01 7.401000e-01
+ 9.033000e-01 1.708000e-01 1.453700e+00 2.186000e-01 1.625800e+00 2.814000e-01 1.394600e+00 2.859000e-01 1.638100e+00 3.015000e-01 1.507100e+00 1.582000e-01 8.207000e-01 1.479000e-01
+ 7.640000e-02 9.569000e-01 1.070000e-02 1.348700e+00 -2.890000e-02 1.525800e+00 -6.690000e-02 1.814900e+00 5.600000e-02 1.930900e+00 -7.500000e-03 8.600000e-03 1.070000e-02 7.913000e-01
+ 6.920000e-01 1.085000e-01 1.126700e+00 1.214000e-01 1.123300e+00 3.258000e-01 9.782000e-01 2.989000e-01 1.109100e+00 3.648000e-01 9.312000e-01 3.528000e-01 5.654000e-01 1.710000e-01
+ 1.140900e+00 -4.250000e-02 5.855000e-01 1.156300e+00 5.839000e-01 5.531000e-01 6.872000e-01 1.039200e+00 5.730000e-01 5.251000e-01 6.184000e-01 1.117100e+00 4.544000e-01 3.476000e-01
+ 2.265000e-01 6.849000e-01 5.588000e-01 6.636000e-01 5.984000e-01 7.636000e-01 1.767000e+00 -3.880000e-02 4.490000e-02 1.949400e+00 -5.390000e-02 6.500000e-02 3.104000e-01 4.117000e-01
+ 1.136600e+00 -3.860000e-02 7.026000e-01 6.384000e-01 4.141000e-01 5.617000e-01 6.621000e-01 6.825000e-01 6.124000e-01 3.018000e-01 7.418000e-01 5.917000e-01 4.552000e-01 2.588000e-01
+ 1.151400e+00 -5.220000e-02 7.812000e-01 9.582000e-01 6.172000e-01 8.571000e-01 8.638000e-01 8.509000e-01 6.028000e-01 8.008000e-01 8.650000e-01 8.518000e-01 5.288000e-01 3.810000e-01
+ 3.512000e-01 7.155000e-01 4.879000e-01 1.195400e+00 6.452000e-01 1.257600e+00 5.178000e-01 1.157700e+00 5.995000e-01 1.341400e+00 4.382000e-01 1.253600e+00 2.947000e-01 6.734000e-01
+ 3.928000e-01 2.647000e-01 5.870000e-01 4.481000e-01 6.574000e-01 5.253000e-01 6.692000e-01 3.489000e-01 7.849000e-01 3.914000e-01 6.414000e-01 3.851000e-01 3.603000e-01 2.330000e-01
+ 9.810000e-02 5.790000e-01 2.610000e-02 1.055300e+00 1.204000e-01 1.094800e+00 1.193000e-01 9.473000e-01 1.539000e-01 1.071500e+00 1.487000e-01 9.104000e-01 5.580000e-02 5.622000e-01
+ 3.960000e-01 5.710000e-01 6.352000e-01 8.757000e-01 6.999000e-01 1.032200e+00 6.124000e-01 9.060000e-01 7.728000e-01 9.632000e-01 6.477000e-01 8.664000e-01 3.643000e-01 5.100000e-01
+ 1.056200e+00 5.920000e-02 5.754000e-01 5.725000e-01 3.995000e-01 4.736000e-01 4.606000e-01 7.063000e-01 4.667000e-01 3.719000e-01 5.426000e-01 6.059000e-01 3.744000e-01 2.928000e-01
+ 1.544000e-01 3.905000e-01 1.708000e-01 6.948000e-01 1.166000e-01 8.799000e-01 1.731000e-01 6.905000e-01 2.379000e-01 7.522000e-01 1.968000e-01 6.632000e-01 1.038000e-01 3.948000e-01
+ 7.474000e-01 3.670000e-01 4.144000e-01 1.308300e+00 3.011000e-01 7.628000e-01 4.681000e-01 1.247800e+00 2.852000e-01 7.465000e-01 2.409000e-01 1.516200e+00 2.805000e-01 4.849000e-01
+ 5.245000e-01 1.796000e-01 8.253000e-01 2.766000e-01 9.779000e-01 2.720000e-01 8.928000e-01 1.989000e-01 1.056100e+00 1.957000e-01 8.944000e-01 1.973000e-01 4.657000e-01 1.730000e-01
+ 2.587000e-01 3.300000e-01 3.558000e-01 5.741000e-01 4.228000e-01 6.357000e-01 3.226000e-01 6.190000e-01 4.366000e-01 6.308000e-01 3.088000e-01 6.295000e-01 2.096000e-01 3.275000e-01
+ 4.381000e-01 3.545000e-01 7.058000e-01 5.334000e-01 7.713000e-01 6.482000e-01 6.476000e-01 6.045000e-01 8.761000e-01 5.459000e-01 7.537000e-01 4.779000e-01 4.071000e-01 3.084000e-01
+ 1.175000e-01 6.417000e-01 2.160000e-02 3.414000e-01 -4.500000e-03 3.157000e-01 4.350000e-02 3.126000e-01 8.800000e-02 2.014000e-01 8.650000e-02 2.635000e-01 5.010000e-02 2.254000e-01
+ 1.150900e+00 -5.330000e-02 8.148000e-01 1.712000e-01 6.775000e-01 8.840000e-02 8.537000e-01 1.243000e-01 6.137000e-01 1.453000e-01 7.788000e-01 2.154000e-01 5.315000e-01 8.070000e-02
+-5.630000e-02 9.848000e-01 1.351000e-01 1.084000e+00 6.650000e-02 1.297100e+00 8.910000e-02 1.623000e+00 6.430000e-02 1.925100e+00 4.000000e-04 -6.000000e-04 4.630000e-02 6.800000e-01
+ 1.074000e+00 4.070000e-02 4.828000e-01 1.259400e+00 3.677000e-01 1.607300e+00 4.678000e-01 1.268900e+00 5.239000e-01 1.195600e+00 4.787000e-01 1.261600e+00 3.748000e-01 6.080000e-01
+ 3.009000e-01 2.211000e-01 5.387000e-01 3.073000e-01 6.518000e-01 3.179000e-01 1.888000e+00 -1.786000e-01 1.250000e-02 -1.310000e-02 4.310000e-02 -5.170000e-02 3.051000e-01 1.811000e-01
+ 4.833000e-01 5.855000e-01 6.725000e-01 1.015700e+00 8.943000e-01 1.002800e+00 7.816000e-01 8.851000e-01 9.070000e-01 1.016200e+00 7.665000e-01 9.063000e-01 4.491000e-01 5.144000e-01
+ 2.202000e-01 1.536000e-01 3.610000e-01 2.209000e-01 5.806000e-01 5.000000e-02 3.611000e-01 2.241000e-01 4.184000e-01 2.550000e-01 4.442000e-01 1.251000e-01 2.334000e-01 9.850000e-02
+ 5.666000e-01 5.338000e-01 2.767000e-01 1.301500e+00 2.398000e-01 7.728000e-01 2.772000e-01 1.298300e+00 2.390000e-01 7.463000e-01 3.174000e-01 1.256700e+00 2.205000e-01 5.236000e-01
+ 3.376000e-01 7.419000e-01 4.848000e-01 1.221000e+00 6.335000e-01 1.291000e+00 5.921000e-01 1.088100e+00 6.010000e-01 1.356400e+00 4.958000e-01 1.201200e+00 3.003000e-01 6.781000e-01
+ 9.463000e-01 2.020000e-02 1.380900e+00 1.522000e-01 1.676300e+00 5.780000e-02 1.385100e+00 1.532000e-01 1.648500e+00 1.135000e-01 1.339300e+00 2.017000e-01 8.296000e-01 5.060000e-02
+ 3.907000e-01 6.294000e-01 5.879000e-01 7.718000e-01 8.172000e-01 6.377000e-01 1.697300e+00 4.520000e-02 1.263000e-01 1.845900e+00 -1.900000e-03 1.300000e-03 4.017000e-01 3.842000e-01
+ 1.724000e-01 3.817000e-01 1.702000e-01 7.203000e-01 2.873000e-01 7.049000e-01 2.921000e-01 5.731000e-01 3.515000e-01 6.442000e-01 2.986000e-01 5.659000e-01 1.455000e-01 3.580000e-01
+ 1.312000e-01 7.093000e-01 3.948000e-01 7.673000e-01 5.272000e-01 7.554000e-01 1.743300e+00 -1.250000e-02 2.350000e-02 1.975500e+00 2.500000e-03 -3.900000e-03 2.266000e-01 4.576000e-01
+ 1.263000e-01 6.105000e-01 2.655000e-01 8.748000e-01 2.336000e-01 1.076800e+00 1.833000e-01 9.726000e-01 3.004000e-01 1.016500e+00 1.962000e-01 9.532000e-01 1.264000e-01 5.363000e-01
+ 6.043000e-01 4.914000e-01 1.090000e+00 5.961000e-01 1.056900e+00 8.966000e-01 1.027900e+00 6.694000e-01 1.212700e+00 7.549000e-01 1.065400e+00 6.278000e-01 6.046000e-01 3.749000e-01
+ 1.028400e+00 8.930000e-02 4.097000e-01 9.375000e-01 2.813000e-01 6.865000e-01 3.827000e-01 9.620000e-01 2.830000e-01 6.586000e-01 3.508000e-01 1.008600e+00 3.087000e-01 4.039000e-01
+-1.970000e-02 7.719000e-01 4.634000e-01 5.700000e-01 5.143000e-01 6.520000e-01 -2.580000e-02 1.762600e+00 -1.730000e-02 2.023200e+00 2.500000e-02 -2.650000e-02 1.832000e-01 4.379000e-01
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/olgd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/olgd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,181 @@
+function [net, options, errlog, pointlog] = olgd(net, options, x, t)
+%OLGD On-line gradient descent optimization.
+%
+% Description
+% [NET, OPTIONS, ERRLOG, POINTLOG] = OLGD(NET, OPTIONS, X, T) uses on-
+% line gradient descent to find a local minimum of the error function
+% for the network NET computed on the input data X and target values T.
+% A log of the error values after each cycle is (optionally) returned
+% in ERRLOG, and a log of the points visited is (optionally) returned
+% in POINTLOG. Because the gradient is computed on-line (i.e. after
+% each pattern) this can be quite inefficient in Matlab.
+%
+% The error function value at final weight vector is returned in
+% OPTIONS(8).
+%
+% The optional parameters have the following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values; also logs error
+% values in the return argument ERRLOG, and the points visited in the
+% return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
+% warning messages are displayed. If OPTIONS(1) is -1, then nothing is
+% displayed.
+%
+% OPTIONS(2) is the precision required for the value of X at the
+% solution. If the absolute difference between the values of X between
+% two successive steps is less than OPTIONS(2), then this condition is
+% satisfied.
+%
+% OPTIONS(3) is the precision required of the objective function at the
+% solution. If the absolute difference between the error functions
+% between two successive steps is less than OPTIONS(3), then this
+% condition is satisfied. Both this and the previous condition must be
+% satisfied for termination. Note that testing the function value at
+% each iteration roughly halves the speed of the algorithm.
+%
+% OPTIONS(5) determines whether the patterns are sampled randomly with
+% replacement. If it is 0 (the default), then patterns are sampled in
+% order.
+%
+% OPTIONS(6) determines if the learning rate decays. If it is 1 then
+% the learning rate decays at a rate of 1/T. If it is 0 (the default)
+% then the learning rate is constant.
+%
+% OPTIONS(9) should be set to 1 to check the user defined gradient
+% function.
+%
+% OPTIONS(10) returns the total number of function evaluations
+% (including those in any line searches).
+%
+% OPTIONS(11) returns the total number of gradient evaluations.
+%
+% OPTIONS(14) is the maximum number of iterations (passes through the
+% complete pattern set); default 100.
+%
+% OPTIONS(17) is the momentum; default 0.5.
+%
+% OPTIONS(18) is the learning rate; default 0.01.
+%
+% See also
+% GRADDESC
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set up the options.
+if length(options) < 18
+ error('Options vector too short')
+end
+
+if (options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+
+% Learning rate: must be positive
+if (options(18) > 0)
+ eta = options(18);
+else
+ eta = 0.01;
+end
+% Save initial learning rate for annealing
+lr = eta;
+% Momentum term: allow zero momentum
+if (options(17) >= 0)
+ mu = options(17);
+else
+ mu = 0.5;
+end
+
+pakstr = [net.type, 'pak'];
+unpakstr = [net.type, 'unpak'];
+
+% Extract initial weights from the network
+w = feval(pakstr, net);
+
+display = options(1);
+
+% Work out if we need to compute f at each iteration.
+% Needed if display results or if termination
+% criterion requires it.
+fcneval = (display | options(3));
+
+% Check gradients
+if (options(9))
+ feval('gradchek', w, 'neterr', 'netgrad', net, x, t);
+end
+
+dwold = zeros(1, length(w));
+fold = 0; % Must be initialised so that termination test can be performed
+ndata = size(x, 1);
+
+if fcneval
+ fnew = neterr(w, net, x, t);
+ options(10) = options(10) + 1;
+ fold = fnew;
+end
+
+j = 1;
+if nargout >= 3
+ errlog(j, :) = fnew;
+ if nargout == 4
+ pointlog(j, :) = w;
+ end
+end
+
+% Main optimization loop.
+while j <= niters
+ wold = w;
+ if options(5)
+ % Randomise order of pattern presentation: with replacement
+ pnum = ceil(rand(ndata, 1).*ndata);
+ else
+ pnum = 1:ndata;
+ end
+ for k = 1:ndata
+ grad = netgrad(w, net, x(pnum(k),:), t(pnum(k),:));
+ if options(6)
+ % Let learning rate decrease as 1/t
+ lr = eta/((j-1)*ndata + k);
+ end
+ dw = mu*dwold - lr*grad;
+ w = w + dw;
+ dwold = dw;
+ end
+ options(11) = options(11) + 1; % Increment gradient evaluation count
+ if fcneval
+ fold = fnew;
+ fnew = neterr(w, net, x, t);
+ options(10) = options(10) + 1;
+ end
+ if display
+ fprintf(1, 'Iteration %5d Error %11.8f\n', j, fnew);
+ end
+ j = j + 1;
+ if nargout >= 3
+ errlog(j) = fnew;
+ if nargout == 4
+ pointlog(j, :) = w;
+ end
+ end
+ if (max(abs(w - wold)) < options(2) & abs(fnew - fold) < options(3))
+ % Termination criteria are met
+ options(8) = fnew;
+ net = feval(unpakstr, net, w);
+ return;
+ end
+end
+
+if fcneval
+ options(8) = fnew;
+else
+ % Return error on entire dataset
+ options(8) = neterr(w, net, x, t);
+ options(10) = options(10) + 1;
+end
+if (options(1) >= 0)
+ disp(maxitmess);
+end
+
+net = feval(unpakstr, net, w);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/pca.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/pca.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,42 @@
+function [PCcoeff, PCvec] = pca(data, N)
+%PCA Principal Components Analysis
+%
+% Description
+% PCCOEFF = PCA(DATA) computes the eigenvalues of the covariance
+% matrix of the dataset DATA and returns them as PCCOEFF. These
+% coefficients give the variance of DATA along the corresponding
+% principal components.
+%
+% PCCOEFF = PCA(DATA, N) returns the largest N eigenvalues.
+%
+% [PCCOEFF, PCVEC] = PCA(DATA) returns the principal components as well
+% as the coefficients. This is considerably more computationally
+% demanding than just computing the eigenvalues.
+%
+% See also
+% EIGDEC, GTMINIT, PPCA
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+if nargin == 1
+ N = size(data, 2);
+end
+
+if nargout == 1
+ evals_only = logical(1);
+else
+ evals_only = logical(0);
+end
+
+if N ~= round(N) | N < 1 | N > size(data, 2)
+ error('Number of PCs must be integer, >0, < dim');
+end
+
+% Find the sorted eigenvalues of the data covariance matrix
+if evals_only
+ PCcoeff = eigdec(cov(data), N);
+else
+ [PCcoeff, PCvec] = eigdec(cov(data), N);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/plotmat.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/plotmat.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,44 @@
+function plotmat(matrix, textcolour, gridcolour, fontsize)
+%PLOTMAT Display a matrix.
+%
+% Description
+% PLOTMAT(MATRIX, TEXTCOLOUR, GRIDCOLOUR, FONTSIZE) displays the matrix
+% MATRIX on the current figure. The TEXTCOLOUR and GRIDCOLOUR
+% arguments control the colours of the numbers and grid labels
+% respectively and should follow the usual Matlab specification. The
+% parameter FONTSIZE should be an integer.
+%
+% See also
+% CONFFIG, DEMMLP2
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+[m,n]=size(matrix);
+for rowCnt=1:m,
+ for colCnt=1:n,
+ numberString=num2str(matrix(rowCnt,colCnt));
+ text(colCnt-.5,m-rowCnt+.5,numberString, ...
+ 'HorizontalAlignment','center', ...
+ 'Color', textcolour, ...
+ 'FontWeight','bold', ...
+ 'FontSize', fontsize);
+ end;
+end;
+
+set(gca,'Box','on', ...
+ 'Visible','on', ...
+ 'xLim',[0 n], ...
+ 'xGrid','on', ...
+ 'xTickLabel',[], ...
+ 'xTick',0:n, ...
+ 'yGrid','on', ...
+ 'yLim',[0 m], ...
+ 'yTickLabel',[], ...
+ 'yTick',0:m, ...
+ 'DataAspectRatio',[1, 1, 1], ...
+ 'GridLineStyle',':', ...
+ 'LineWidth',3, ...
+ 'XColor',gridcolour, ...
+ 'YColor',gridcolour);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/ppca.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/ppca.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function [var, U, lambda] = ppca(x, ppca_dim)
+%PPCA Probabilistic Principal Components Analysis
+%
+% Description
+% [VAR, U, LAMBDA] = PPCA(X, PPCA_DIM) computes the principal
+% component subspace U of dimension PPCA_DIM using a centred covariance
+% matrix X. The variable VAR contains the off-subspace variance (which
+% is assumed to be spherical), while the vector LAMBDA contains the
+% variances of each of the principal components. This is computed
+% using the eigenvalue and eigenvector decomposition of X.
+%
+% See also
+% EIGDEC, PCA
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+
+if ppca_dim ~= round(ppca_dim) | ppca_dim < 1 | ppca_dim > size(x, 2)
+ error('Number of PCs must be integer, >0, < dim');
+end
+
+[ndata, data_dim] = size(x);
+% Assumes that x is centred and responsibility weighted
+% covariance matrix
+[l Utemp] = eigdec(x, data_dim);
+% Zero any negative eigenvalues (caused by rounding)
+l(l<0) = 0;
+% Now compute the sigma squared values for all possible values
+% of q
+s2_temp = cumsum(l(end:-1:1))./[1:data_dim]';
+% If necessary, reduce the value of q so that var is at least
+% eps * largest eigenvalue
+q_temp = min([ppca_dim; data_dim-min(find(s2_temp/l(1) > eps))]);
+if q_temp ~= ppca_dim
+ wstringpart = 'Covariance matrix ill-conditioned: extracted';
+ wstring = sprintf('%s %d/%d PCs', ...
+ wstringpart, q_temp, ppca_dim);
+ warning(wstring);
+end
+if q_temp == 0
+ % All the latent dimensions have disappeared, so we are
+ % just left with the noise model
+ var = l(1)/data_dim;
+ lambda = var*ones(1, ppca_dim);
+else
+ var = mean(l(q_temp+1:end));
+end
+U = Utemp(:, 1:q_temp);
+lambda(1:q_temp) = l(1:q_temp);
+
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/quasinew.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/quasinew.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,176 @@
+function [x, options, flog, pointlog] = quasinew(f, x, options, gradf, ...
+ varargin)
+%QUASINEW Quasi-Newton optimization.
+%
+% Description
+% [X, OPTIONS, FLOG, POINTLOG] = QUASINEW(F, X, OPTIONS, GRADF) uses a
+% quasi-Newton algorithm to find a local minimum of the function F(X)
+% whose gradient is given by GRADF(X). Here X is a row vector and F
+% returns a scalar value. The point at which F has a local minimum is
+% returned as X. The function value at that point is returned in
+% OPTIONS(8). A log of the function values after each cycle is
+% (optionally) returned in FLOG, and a log of the points visited is
+% (optionally) returned in POINTLOG.
+%
+% QUASINEW(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional
+% arguments to be passed to F() and GRADF().
+%
+% The optional parameters have the following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values; also logs error
+% values in the return argument ERRLOG, and the points visited in the
+% return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
+% warning messages are displayed. If OPTIONS(1) is -1, then nothing is
+% displayed.
+%
+% OPTIONS(2) is a measure of the absolute precision required for the
+% value of X at the solution. If the absolute difference between the
+% values of X between two successive steps is less than OPTIONS(2),
+% then this condition is satisfied.
+%
+% OPTIONS(3) is a measure of the precision required of the objective
+% function at the solution. If the absolute difference between the
+% objective function values between two successive steps is less than
+% OPTIONS(3), then this condition is satisfied. Both this and the
+% previous condition must be satisfied for termination.
+%
+% OPTIONS(9) should be set to 1 to check the user defined gradient
+% function.
+%
+% OPTIONS(10) returns the total number of function evaluations
+% (including those in any line searches).
+%
+% OPTIONS(11) returns the total number of gradient evaluations.
+%
+% OPTIONS(14) is the maximum number of iterations; default 100.
+%
+% OPTIONS(15) is the precision in parameter space of the line search;
+% default 1E-2.
+%
+% See also
+% CONJGRAD, GRADDESC, LINEMIN, MINBRACK, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set up the options.
+if length(options) < 18
+ error('Options vector too short')
+end
+
+if(options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+
+% Set up options for line search
+line_options = foptions;
+% Don't need a very precise line search
+if options(15) > 0
+ line_options(2) = options(15);
+else
+ line_options(2) = 1e-2; % Default
+end
+% Minimal fractional change in f from Newton step: otherwise do a line search
+min_frac_change = 1e-4;
+
+display = options(1);
+
+% Next two lines allow quasinew to work with expression strings
+f = fcnchk(f, length(varargin));
+gradf = fcnchk(gradf, length(varargin));
+
+% Check gradients
+if (options(9))
+ feval('gradchek', x, f, gradf, varargin{:});
+end
+
+nparams = length(x);
+fnew = feval(f, x, varargin{:});
+options(10) = options(10) + 1;
+gradnew = feval(gradf, x, varargin{:});
+options(11) = options(11) + 1;
+p = -gradnew; % Search direction
+hessinv = eye(nparams); % Initialise inverse Hessian to be identity matrix
+j = 1;
+if nargout >= 3
+ flog(j, :) = fnew;
+ if nargout == 4
+ pointlog(j, :) = x;
+ end
+end
+
+while (j <= niters)
+
+ xold = x;
+ fold = fnew;
+ gradold = gradnew;
+
+ x = xold + p;
+ fnew = feval(f, x, varargin{:});
+ options(10) = options(10) + 1;
+
+ % This shouldn't occur, but rest of code depends on sd being downhill
+ if (gradnew*p' >= 0)
+ p = -p;
+ if options(1) >= 0
+ warning('search direction uphill in quasinew');
+ end
+ end
+
+ % Does the Newton step reduce the function value sufficiently?
+ if (fnew >= fold + min_frac_change * (gradnew*p'))
+ % No it doesn't
+ % Minimize along current search direction: must be less than Newton step
+ [lmin, line_options] = feval('linemin', f, xold, p, fold, ...
+ line_options, varargin{:});
+ options(10) = options(10) + line_options(10);
+ options(11) = options(11) + line_options(11);
+ % Correct x and fnew to be the actual search point we have found
+ x = xold + lmin * p;
+ p = x - xold;
+ fnew = line_options(8);
+ end
+
+ % Check for termination
+ if (max(abs(x - xold)) < options(2) & max(abs(fnew - fold)) < options(3))
+ options(8) = fnew;
+ return;
+ end
+ gradnew = feval(gradf, x, varargin{:});
+ options(11) = options(11) + 1;
+ v = gradnew - gradold;
+ vdotp = v*p';
+
+ % Skip update to inverse Hessian if fac not sufficiently positive
+ if (vdotp*vdotp > eps*sum(v.^2)*sum(p.^2))
+ Gv = (hessinv*v')';
+ vGv = sum(v.*Gv);
+ u = p./vdotp - Gv./vGv;
+ % Use BFGS update rule
+ hessinv = hessinv + (p'*p)/vdotp - (Gv'*Gv)/vGv + vGv*(u'*u);
+ end
+
+ p = -(hessinv * gradnew')';
+
+ if (display > 0)
+ fprintf(1, 'Cycle %4d Function %11.6f\n', j, fnew);
+ end
+
+ j = j + 1;
+ if nargout >= 3
+ flog(j, :) = fnew;
+ if nargout == 4
+ pointlog(j, :) = x;
+ end
+ end
+end
+
+% If we get here, then we haven't terminated in the given number of
+% iterations.
+
+options(8) = fold;
+if (options(1) >= 0)
+ disp(maxitmess);
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,123 @@
+function net = rbf(nin, nhidden, nout, rbfunc, outfunc, prior, beta)
+%RBF Creates an RBF network with specified architecture
+%
+% Description
+% NET = RBF(NIN, NHIDDEN, NOUT, RBFUNC) constructs and initialises a
+% radial basis function network returning a data structure NET. The
+% weights are all initialised with a zero mean, unit variance normal
+% distribution, with the exception of the variances, which are set to
+% one. This makes use of the Matlab function RANDN and so the seed for
+% the random weight initialization can be set using RANDN('STATE', S)
+% where S is the seed value. The activation functions are defined in
+% terms of the distance between the data point and the corresponding
+% centre. Note that the functions are computed to a convenient
+% constant multiple: for example, the Gaussian is not normalised.
+% (Normalisation is not needed as the function outputs are linearly
+% combined in the next layer.)
+%
+% The fields in NET are
+% type = 'rbf'
+% nin = number of inputs
+% nhidden = number of hidden units
+% nout = number of outputs
+% nwts = total number of weights and biases
+% actfn = string defining hidden unit activation function:
+% 'gaussian' for a radially symmetric Gaussian function.
+% 'tps' for r^2 log r, the thin plate spline function.
+% 'r4logr' for r^4 log r.
+% outfn = string defining output error function:
+% 'linear' for linear outputs (default) and SoS error.
+% 'neuroscale' for Sammon stress measure.
+% c = centres
+% wi = squared widths (null for rlogr and tps)
+% w2 = second layer weight matrix
+% b2 = second layer bias vector
+%
+% NET = RBF(NIN, NHIDDEN, NOUT, RBFUND, OUTFUNC) allows the user to
+% specify the type of error function to be used. The field OUTFN is
+% set to the value of this string. Linear outputs (for regression
+% problems) and Neuroscale outputs (for topographic mappings) are
+% supported.
+%
+% NET = RBF(NIN, NHIDDEN, NOUT, RBFUNC, OUTFUNC, PRIOR, BETA), in which
+% PRIOR is a scalar, allows the field NET.ALPHA in the data structure
+% NET to be set, corresponding to a zero-mean isotropic Gaussian prior
+% with inverse variance with value PRIOR. Alternatively, PRIOR can
+% consist of a data structure with fields ALPHA and INDEX, allowing
+% individual Gaussian priors to be set over groups of weights in the
+% network. Here ALPHA is a column vector in which each element
+% corresponds to a separate group of weights, which need not be
+% mutually exclusive. The membership of the groups is defined by the
+% matrix INDX in which the columns correspond to the elements of ALPHA.
+% Each column has one element for each weight in the matrix, in the
+% order defined by the function RBFPAK, and each element is 1 or 0
+% according to whether the weight is a member of the corresponding
+% group or not. A utility function RBFPRIOR is provided to help in
+% setting up the PRIOR data structure.
+%
+% NET = RBF(NIN, NHIDDEN, NOUT, FUNC, PRIOR, BETA) also sets the
+% additional field NET.BETA in the data structure NET, where beta
+% corresponds to the inverse noise variance.
+%
+% See also
+% RBFERR, RBFFWD, RBFGRAD, RBFPAK, RBFTRAIN, RBFUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+net.type = 'rbf';
+net.nin = nin;
+net.nhidden = nhidden;
+net.nout = nout;
+
+% Check that function is an allowed type
+actfns = {'gaussian', 'tps', 'r4logr'};
+outfns = {'linear', 'neuroscale'};
+if (strcmp(rbfunc, actfns)) == 0
+ error('Undefined activation function.')
+else
+ net.actfn = rbfunc;
+end
+if nargin <= 4
+ net.outfn = outfns{1};
+elseif (strcmp(outfunc, outfns) == 0)
+ error('Undefined output function.')
+else
+ net.outfn = outfunc;
+ end
+
+% Assume each function has a centre and a single width parameter, and that
+% hidden layer to output weights include a bias. Only the Gaussian function
+% requires a width
+net.nwts = nin*nhidden + (nhidden + 1)*nout;
+if strcmp(rbfunc, 'gaussian')
+ % Extra weights for width parameters
+ net.nwts = net.nwts + nhidden;
+end
+
+if nargin > 5
+ if isstruct(prior)
+ net.alpha = prior.alpha;
+ net.index = prior.index;
+ elseif size(prior) == [1 1]
+ net.alpha = prior;
+ else
+ error('prior must be a scalar or a structure');
+ end
+ if nargin > 6
+ net.beta = beta;
+ end
+end
+
+w = randn(1, net.nwts);
+net = rbfunpak(net, w);
+
+% Make widths equal to one
+if strcmp(rbfunc, 'gaussian')
+ net.wi = ones(1, nhidden);
+end
+
+if strcmp(net.outfn, 'neuroscale')
+ net.mask = rbfprior(rbfunc, nin, nhidden, nout);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfbkp.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfbkp.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,72 @@
+function g = rbfbkp(net, x, z, n2, deltas)
+%RBFBKP Backpropagate gradient of error function for RBF network.
+%
+% Description
+% G = RBFBKP(NET, X, Z, N2, DELTAS) takes a network data structure NET
+% together with a matrix X of input vectors, a matrix Z of hidden unit
+% activations, a matrix N2 of the squared distances between centres and
+% inputs, and a matrix DELTAS of the gradient of the error function
+% with respect to the values of the output units (i.e. the summed
+% inputs to the output units, before the activation function is
+% applied). The return value is the gradient G of the error function
+% with respect to the network weights. Each row of X corresponds to one
+% input vector.
+%
+% This function is provided so that the common backpropagation
+% algorithm can be used by RBF network models to compute gradients for
+% the output values (in RBFDERIV) as well as standard error functions.
+%
+% See also
+% RBF, RBFGRAD, RBFDERIV
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Evaluate second-layer gradients.
+gw2 = z'*deltas;
+gb2 = sum(deltas);
+
+% Evaluate hidden unit gradients
+delhid = deltas*net.w2';
+
+gc = zeros(net.nhidden, net.nin);
+ndata = size(x, 1);
+t1 = ones(ndata, 1);
+t2 = ones(1, net.nin);
+% Switch on activation function type
+switch net.actfn
+
+case 'gaussian' % Gaussian
+ delhid = (delhid.*z);
+ % A loop seems essential, so do it with the shortest index vector
+ if (net.nin < net.nhidden)
+ for i = 1:net.nin
+ gc(:,i) = (sum(((x(:,i)*ones(1, net.nhidden)) - ...
+ (ones(ndata, 1)*(net.c(:,i)'))).*delhid, 1)./net.wi)';
+ end
+ else
+ for i = 1:net.nhidden
+ gc(i,:) = sum((x - (t1*(net.c(i,:)))./net.wi(i)).*(delhid(:,i)*t2), 1);
+ end
+ end
+ gwi = sum((n2.*delhid)./(2.*(ones(ndata, 1)*(net.wi.^2))), 1);
+
+case 'tps' % Thin plate spline activation function
+ delhid = delhid.*(1+log(n2+(n2==0)));
+ for i = 1:net.nhidden
+ gc(i,:) = sum(2.*((t1*(net.c(i,:)) - x)).*(delhid(:,i)*t2), 1);
+ end
+ % widths are not adjustable in this model
+ gwi = [];
+case 'r4logr' % r^4 log r activation function
+ delhid = delhid.*(n2.*(1+2.*log(n2+(n2==0))));
+ for i = 1:net.nhidden
+ gc(i,:) = sum(2.*((t1*(net.c(i,:)) - x)).*(delhid(:,i)*t2), 1);
+ end
+ % widths are not adjustable in this model
+ gwi = [];
+otherwise
+ error('Unknown activation function in rbfgrad')
+end
+
+g = [gc(:)', gwi, gw2(:)', gb2];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfderiv.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfderiv.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,53 @@
+function g = rbfderiv(net, x)
+%RBFDERIV Evaluate derivatives of RBF network outputs with respect to weights.
+%
+% Description
+% G = RBFDERIV(NET, X) takes a network data structure NET and a matrix
+% of input vectors X and returns a three-index matrix G whose I, J, K
+% element contains the derivative of network output K with respect to
+% weight or bias parameter J for input pattern I. The ordering of the
+% weight and bias parameters is defined by RBFUNPAK. This function
+% also takes into account any mask in the network data structure.
+%
+% See also
+% RBF, RBFPAK, RBFGRAD, RBFBKP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'rbf', x);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+if ~strcmp(net.outfn, 'linear')
+ error('Function only implemented for linear outputs')
+end
+
+[y, z, n2] = rbffwd(net, x);
+ndata = size(x, 1);
+
+if isfield(net, 'mask')
+ nwts = size(find(net.mask), 1);
+ temp = zeros(1, net.nwts);
+else
+ nwts = net.nwts;
+end
+
+g = zeros(ndata, nwts, net.nout);
+for k = 1 : net.nout
+ delta = zeros(1, net.nout);
+ delta(1, k) = 1;
+ for n = 1 : ndata
+ if isfield(net, 'mask')
+ temp = rbfbkp(net, x(n, :), z(n, :), n2(n, :), delta);
+ g(n, :, k) = temp(logical(net.mask));
+ else
+ g(n, :, k) = rbfbkp(net, x(n, :), z(n, :), n2(n, :),...
+ delta);
+ end
+ end
+end
+
+
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbferr.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbferr.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,50 @@
+function [e, edata, eprior] = rbferr(net, x, t)
+%RBFERR Evaluate error function for RBF network.
+%
+% Description
+% E = RBFERR(NET, X, T) takes a network data structure NET together
+% with a matrix X of input vectors and a matrix T of target vectors,
+% and evaluates the appropriate error function E depending on
+% NET.OUTFN. Each row of X corresponds to one input vector and each
+% row of T contains the corresponding target vector.
+%
+% [E, EDATA, EPRIOR] = RBFERR(NET, X, T) additionally returns the data
+% and prior components of the error, assuming a zero mean Gaussian
+% prior on the weights with inverse variance parameters ALPHA and BETA
+% taken from the network data structure NET.
+%
+% See also
+% RBF, RBFFWD, RBFGRAD, RBFPAK, RBFTRAIN, RBFUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+switch net.outfn
+case 'linear'
+ errstring = consist(net, 'rbf', x, t);
+case 'neuroscale'
+ errstring = consist(net, 'rbf', x);
+otherwise
+ error(['Unknown output function ', net.outfn]);
+end
+if ~isempty(errstring);
+ error(errstring);
+end
+
+switch net.outfn
+case 'linear'
+ y = rbffwd(net, x);
+ edata = 0.5*sum(sum((y - t).^2));
+case 'neuroscale'
+ y = rbffwd(net, x);
+ y_dist = sqrt(dist2(y, y));
+ % Take t as target distance matrix
+ edata = 0.5.*(sum(sum((t-y_dist).^2)));
+otherwise
+ error(['Unknown output function ', net.outfn]);
+end
+
+% Compute Bayesian regularised error
+[e, edata, eprior] = errbayes(net, edata);
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfevfwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfevfwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function [y, extra, invhess] = rbfevfwd(net, x, t, x_test, invhess)
+%RBFEVFWD Forward propagation with evidence for RBF
+%
+% Description
+% Y = RBFEVFWD(NET, X, T, X_TEST) takes a network data structure NET
+% together with the input X and target T training data and input test
+% data X_TEST. It returns the normal forward propagation through the
+% network Y together with a matrix EXTRA which consists of error bars
+% (variance) for a regression problem or moderated outputs for a
+% classification problem.
+%
+% The optional argument (and return value) INVHESS is the inverse of
+% the network Hessian computed on the training data inputs and targets.
+% Passing it in avoids recomputing it, which can be a significant
+% saving for large training sets.
+%
+% See also
+% FEVBAYES
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+y = rbffwd(net, x_test);
+% RBF outputs must be linear, so just pass them twice (second copy is
+% not used
+if nargin == 4
+ [extra, invhess] = fevbayes(net, y, y, x, t, x_test);
+else
+ [extra, invhess] = fevbayes(net, y, y, x, t, x_test, invhess);
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbffwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbffwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+function [a, z, n2] = rbffwd(net, x)
+%RBFFWD Forward propagation through RBF network with linear outputs.
+%
+% Description
+% A = RBFFWD(NET, X) takes a network data structure NET and a matrix X
+% of input vectors and forward propagates the inputs through the
+% network to generate a matrix A of output vectors. Each row of X
+% corresponds to one input vector and each row of A contains the
+% corresponding output vector. The activation function that is used is
+% determined by NET.ACTFN.
+%
+% [A, Z, N2] = RBFFWD(NET, X) also generates a matrix Z of the hidden
+% unit activations where each row corresponds to one pattern. These
+% hidden unit activations represent the design matrix for the RBF. The
+% matrix N2 is the squared distances between each basis function centre
+% and each pattern in which each row corresponds to a data point.
+%
+% See also
+% RBF, RBFERR, RBFGRAD, RBFPAK, RBFTRAIN, RBFUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'rbf', x);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+[ndata, data_dim] = size(x);
+
+% Calculate squared norm matrix, of dimension (ndata, ncentres)
+n2 = dist2(x, net.c);
+
+% Switch on activation function type
+switch net.actfn
+
+ case 'gaussian' % Gaussian
+ % Calculate width factors: net.wi contains squared widths
+ wi2 = ones(ndata, 1) * (2 .* net.wi);
+
+ % Now compute the activations
+ z = exp(-(n2./wi2));
+
+ case 'tps' % Thin plate spline
+ z = n2.*log(n2+(n2==0));
+
+ case 'r4logr' % r^4 log r
+ z = n2.*n2.*log(n2+(n2==0));
+
+ otherwise
+ error('Unknown activation function in rbffwd')
+end
+
+a = z*net.w2 + ones(ndata, 1)*net.b2;
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfgrad.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfgrad.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+function [g, gdata, gprior] = rbfgrad(net, x, t)
+%RBFGRAD Evaluate gradient of error function for RBF network.
+%
+% Description
+% G = RBFGRAD(NET, X, T) takes a network data structure NET together
+% with a matrix X of input vectors and a matrix T of target vectors,
+% and evaluates the gradient G of the error function with respect to
+% the network weights (i.e. including the hidden unit parameters). The
+% error function is sum of squares. Each row of X corresponds to one
+% input vector and each row of T contains the corresponding target
+% vector. If the output function is 'NEUROSCALE' then the gradient is
+% only computed for the output layer weights and biases.
+%
+% [G, GDATA, GPRIOR] = RBFGRAD(NET, X, T) also returns separately the
+% data and prior contributions to the gradient. In the case of multiple
+% groups in the prior, GPRIOR is a matrix with a row for each group and
+% a column for each weight parameter.
+%
+% See also
+% RBF, RBFFWD, RBFERR, RBFPAK, RBFUNPAK, RBFBKP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+switch net.outfn
+case 'linear'
+ errstring = consist(net, 'rbf', x, t);
+case 'neuroscale'
+ errstring = consist(net, 'rbf', x);
+otherwise
+ error(['Unknown output function ', net.outfn]);
+end
+if ~isempty(errstring);
+ error(errstring);
+end
+
+ndata = size(x, 1);
+
+[y, z, n2] = rbffwd(net, x);
+
+switch net.outfn
+case 'linear'
+
+ % Sum squared error at output units
+ delout = y - t;
+
+ gdata = rbfbkp(net, x, z, n2, delout);
+ [g, gdata, gprior] = gbayes(net, gdata);
+
+case 'neuroscale'
+ % Compute the error gradient with respect to outputs
+ y_dist = sqrt(dist2(y, y));
+ D = (t - y_dist)./(y_dist+diag(ones(ndata, 1)));
+ temp = y';
+ gradient = 2.*sum(kron(D, ones(1, net.nout)) .* ...
+ (repmat(y, 1, ndata) - repmat((temp(:))', ndata, 1)), 1);
+ gradient = (reshape(gradient, net.nout, ndata))';
+ % Compute the error gradient
+ gdata = rbfbkp(net, x, z, n2, gradient);
+ [g, gdata, gprior] = gbayes(net, gdata);
+otherwise
+ error(['Unknown output function ', net.outfn]);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfhess.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfhess.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,91 @@
+function [h, hdata] = rbfhess(net, x, t, hdata)
+%RBFHESS Evaluate the Hessian matrix for RBF network.
+%
+% Description
+% H = RBFHESS(NET, X, T) takes an RBF network data structure NET, a
+% matrix X of input values, and a matrix T of target values and returns
+% the full Hessian matrix H corresponding to the second derivatives of
+% the negative log posterior distribution, evaluated for the current
+% weight and bias values as defined by NET. Currently, the
+% implementation only computes the Hessian for the output layer
+% weights.
+%
+% [H, HDATA] = RBFHESS(NET, X, T) returns both the Hessian matrix H and
+% the contribution HDATA arising from the data dependent term in the
+% Hessian.
+%
+% H = RBFHESS(NET, X, T, HDATA) takes a network data structure NET, a
+% matrix X of input values, and a matrix T of target values, together
+% with the contribution HDATA arising from the data dependent term in
+% the Hessian, and returns the full Hessian matrix H corresponding to
+% the second derivatives of the negative log posterior distribution.
+% This version saves computation time if HDATA has already been
+% evaluated for the current weight and bias values.
+%
+% See also
+% MLPHESS, HESSCHEK, EVIDENCE
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'rbf', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+if nargin == 3
+ % Data term in Hessian needs to be computed
+ [a, z] = rbffwd(net, x);
+ hdata = datahess(net, z, t);
+end
+
+% Add in effect of regularisation
+[h, hdata] = hbayes(net, hdata);
+
+% Sub-function to compute data part of Hessian
+function hdata = datahess(net, z, t)
+
+% Only works for output layer Hessian currently
+if (isfield(net, 'mask') & ~any(net.mask(...
+ 1:(net.nwts - net.nout*(net.nhidden+1)))))
+ hdata = zeros(net.nwts);
+ ndata = size(z, 1);
+ out_hess = [z ones(ndata, 1)]'*[z ones(ndata, 1)];
+ for j = 1:net.nout
+ hdata = rearrange_hess(net, j, out_hess, hdata);
+ end
+else
+ error('Output layer Hessian only.');
+end
+return
+
+% Sub-function to rearrange Hessian matrix
+function hdata = rearrange_hess(net, j, out_hess, hdata)
+
+% Because all the biases come after all the input weights,
+% we have to rearrange the blocks that make up the network Hessian.
+% This function assumes that we are on the jth output and that all outputs
+% are independent.
+
+% Start of bias weights block
+bb_start = net.nwts - net.nout + 1;
+% Start of weight block for jth output
+ob_start = net.nwts - net.nout*(net.nhidden+1) + (j-1)*net.nhidden...
+ + 1;
+% End of weight block for jth output
+ob_end = ob_start + net.nhidden - 1;
+% Index of bias weight
+b_index = bb_start+(j-1);
+% Put input weight block in right place
+hdata(ob_start:ob_end, ob_start:ob_end) = out_hess(1:net.nhidden, ...
+ 1:net.nhidden);
+% Put second derivative of bias weight in right place
+hdata(b_index, b_index) = out_hess(net.nhidden+1, net.nhidden+1);
+% Put cross terms (input weight v bias weight) in right place
+hdata(b_index, ob_start:ob_end) = out_hess(net.nhidden+1, ...
+ 1:net.nhidden);
+hdata(ob_start:ob_end, b_index) = out_hess(1:net.nhidden, ...
+ net.nhidden+1);
+
+return
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfjacob.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfjacob.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,49 @@
+function jac = rbfjacob(net, x)
+%RBFJACOB Evaluate derivatives of RBF network outputs with respect to inputs.
+%
+% Description
+% G = RBFJACOB(NET, X) takes a network data structure NET and a matrix
+% of input vectors X and returns a three-index matrix G whose I, J, K
+% element contains the derivative of network output K with respect to
+% input parameter J for input pattern I.
+%
+% See also
+% RBF, RBFGRAD, RBFBKP
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'rbf', x);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+if ~strcmp(net.outfn, 'linear')
+ error('Function only implemented for linear outputs')
+end
+
+[y, z, n2] = rbffwd(net, x);
+
+ndata = size(x, 1);
+jac = zeros(ndata, net.nin, net.nout);
+Psi = zeros(net.nin, net.nhidden);
+% Calculate derivative of activations wrt n2
+switch net.actfn
+case 'gaussian'
+ dz = -z./(ones(ndata, 1)*net.wi);
+case 'tps'
+ dz = 2*(1 + log(n2+(n2==0)));
+case 'r4logr'
+ dz = 2*(n2.*(1+2.*log(n2+(n2==0))));
+otherwise
+ error(['Unknown activation function ', net.actfn]);
+end
+
+% Ignore biases as they cannot affect Jacobian
+for n = 1:ndata
+ Psi = (ones(net.nin, 1)*dz(n, :)).* ...
+ (x(n, :)'*ones(1, net.nhidden) - net.c');
+ % Now compute the Jacobian
+ jac(n, :, :) = Psi * net.w2;
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,19 @@
+function w = rbfpak(net)
+%RBFPAK Combines all the parameters in an RBF network into one weights vector.
+%
+% Description
+% W = RBFPAK(NET) takes a network data structure NET and combines the
+% component parameter matrices into a single row vector W.
+%
+% See also
+% RBFUNPAK, RBF
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'rbf');
+if ~errstring
+ error(errstring);
+end
+
+w = [net.c(:)', net.wi, net.w2(:)', net.b2];
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfprior.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfprior.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,54 @@
+function [mask, prior] = rbfprior(rbfunc, nin, nhidden, nout, aw2, ab2)
+%RBFPRIOR Create Gaussian prior and output layer mask for RBF.
+%
+% Description
+% [MASK, PRIOR] = RBFPRIOR(RBFUNC, NIN, NHIDDEN, NOUT, AW2, AB2)
+% generates a vector MASK that selects only the output layer weights.
+% This is because most uses of RBF networks in a Bayesian context have
+% fixed basis functions with the output layer as the only adjustable
+% parameters. In particular, the Neuroscale output error function is
+% designed to work only with this mask.
+%
+% The return value PRIOR is a data structure, with fields PRIOR.ALPHA
+% and PRIOR.INDEX, which specifies a Gaussian prior distribution for
+% the network weights in an RBF network. The parameters AW2 and AB2 are
+% all scalars and represent the regularization coefficients for two
+% groups of parameters in the network corresponding to second-layer
+% weights, and second-layer biases respectively. Then PRIOR.ALPHA
+% represents a column vector of length 2 containing the parameters, and
+% PRIOR.INDEX is a matrix specifying which weights belong in each
+% group. Each column has one element for each weight in the matrix,
+% using the standard ordering as defined in RBFPAK, and each element is
+% 1 or 0 according to whether the weight is a member of the
+% corresponding group or not.
+%
+% See also
+% RBF, RBFERR, RBFGRAD, EVIDENCE
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+nwts_layer2 = nout + (nhidden *nout);
+switch rbfunc
+case 'gaussian'
+ nwts_layer1 = nin*nhidden + nhidden;
+case {'tps', 'r4logr'}
+ nwts_layer1 = nin*nhidden;
+otherwise
+ error('Undefined activation function');
+end
+nwts = nwts_layer1 + nwts_layer2;
+
+% Make a mask only for output layer
+mask = [zeros(nwts_layer1, 1); ones(nwts_layer2, 1)];
+
+if nargout > 1
+ % Construct prior
+ indx = zeros(nwts, 2);
+ mark2 = nwts_layer1 + (nhidden * nout);
+ indx(nwts_layer1 + 1:mark2, 1) = ones(nhidden * nout, 1);
+ indx(mark2 + 1:nwts, 2) = ones(nout, 1);
+
+ prior.index = indx;
+ prior.alpha = [aw2, ab2]';
+end
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfsetbf.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfsetbf.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,39 @@
+function net = rbfsetbf(net, options, x)
+%RBFSETBF Set basis functions of RBF from data.
+%
+% Description
+% NET = RBFSETBF(NET, OPTIONS, X) sets the basis functions of the RBF
+% network NET so that they model the unconditional density of the
+% dataset X. This is done by training a GMM with spherical covariances
+% using GMMEM. The OPTIONS vector is passed to GMMEM. The widths of
+% the functions are set by a call to RBFSETFW.
+%
+% See also
+% RBFTRAIN, RBFSETFW, GMMEM
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'rbf', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+% Create a spherical Gaussian mixture model
+mix = gmm(net.nin, net.nhidden, 'spherical');
+
+% Initialise the parameters from the input data
+% Just use a small number of k means iterations
+kmoptions = zeros(1, 18);
+kmoptions(1) = -1; % Turn off warnings
+kmoptions(14) = 5; % Just 5 iterations to get centres roughly right
+mix = gmminit(mix, x, kmoptions);
+
+% Train mixture model using EM algorithm
+[mix, options] = gmmem(mix, x, options);
+
+% Now set the centres of the RBF from the centres of the mixture model
+net.c = mix.centres;
+
+% options(7) gives scale of function widths
+net = rbfsetfw(net, options(7));
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfsetfw.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfsetfw.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function net = rbfsetfw(net, scale)
+%RBFSETFW Set basis function widths of RBF.
+%
+% Description
+% NET = RBFSETFW(NET, SCALE) sets the widths of the basis functions of
+% the RBF network NET. If Gaussian basis functions are used, then the
+% variances are set to the largest squared distance between centres if
+% SCALE is non-positive and SCALE times the mean distance of each
+% centre to its nearest neighbour if SCALE is positive. Non-Gaussian
+% basis functions do not have a width.
+%
+% See also
+% RBFTRAIN, RBFSETBF, GMMEM
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set the variances to be the largest squared distance between centres
+if strcmp(net.actfn, 'gaussian')
+ cdist = dist2(net.c, net.c);
+ if scale > 0.0
+ % Set variance of basis to be scale times average
+ % distance to nearest neighbour
+ cdist = cdist + realmax*eye(net.nhidden);
+ widths = scale*mean(min(cdist));
+ else
+ widths = max(max(cdist));
+ end
+ net.wi = widths * ones(size(net.wi));
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbftrain.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbftrain.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,203 @@
+function [net, options] = rbftrain(net, options, x, t)
+%RBFTRAIN Two stage training of RBF network.
+%
+% Description
+% NET = RBFTRAIN(NET, OPTIONS, X, T) uses a two stage training
+% algorithm to set the weights in the RBF model structure NET. Each row
+% of X corresponds to one input vector and each row of T contains the
+% corresponding target vector. The centres are determined by fitting a
+% Gaussian mixture model with circular covariances using the EM
+% algorithm through a call to RBFSETBF. (The mixture model is
+% initialised using a small number of iterations of the K-means
+% algorithm.) If the activation functions are Gaussians, then the basis
+% function widths are then set to the maximum inter-centre squared
+% distance.
+%
+% For linear outputs, the hidden to output weights that give rise to
+% the least squares solution can then be determined using the pseudo-
+% inverse. For neuroscale outputs, the hidden to output weights are
+% determined using the iterative shadow targets algorithm. Although
+% this two stage procedure may not give solutions with as low an error
+% as using general purpose non-linear optimisers, it is much faster.
+%
+% The options vector may have two rows: if this is the case, then the
+% second row is passed to RBFSETBF, which allows the user to specify a
+% different number iterations for RBF and GMM training. The optional
+% parameters to RBFTRAIN have the following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values during EM training.
+%
+% OPTIONS(2) is a measure of the precision required for the value of
+% the weights W at the solution.
+%
+% OPTIONS(3) is a measure of the precision required of the objective
+% function at the solution. Both this and the previous condition must
+% be satisfied for termination.
+%
+% OPTIONS(5) is set to 1 if the basis functions parameters should
+% remain unchanged; default 0.
+%
+% OPTIONS(6) is set to 1 if the output layer weights should be should
+% set using PCA. This is only relevant for Neuroscale outputs; default
+% 0.
+%
+% OPTIONS(14) is the maximum number of iterations for the shadow
+% targets algorithm; default 100.
+%
+% See also
+% RBF, RBFERR, RBFFWD, RBFGRAD, RBFPAK, RBFUNPAK, RBFSETBF
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+switch net.outfn
+case 'linear'
+ errstring = consist(net, 'rbf', x, t);
+case 'neuroscale'
+ errstring = consist(net, 'rbf', x);
+otherwise
+ error(['Unknown output function ', net.outfn]);
+end
+if ~isempty(errstring)
+ error(errstring);
+end
+
+% Allow options to have two rows: if this is the case, then the second row
+% is passed to rbfsetbf
+if size(options, 1) == 2
+ setbfoptions = options(2, :);
+ options = options(1, :);
+else
+ setbfoptions = options;
+end
+
+if(~options(14))
+ options(14) = 100;
+end
+% Do we need to test for termination?
+test = (options(2) | options(3));
+
+% Set up the basis function parameters to model the input data density
+% unless options(5) is set.
+if ~(logical(options(5)))
+ net = rbfsetbf(net, setbfoptions, x);
+end
+
+% Compute the design (or activations) matrix
+[y, act] = rbffwd(net, x);
+ndata = size(x, 1);
+
+if strcmp(net.outfn, 'neuroscale') & options(6)
+ % Initialise output layer weights by projecting data with PCA
+ mu = mean(x);
+ [pcvals, pcvecs] = pca(x, net.nout);
+ xproj = (x - ones(ndata, 1)*mu)*pcvecs;
+ % Now use projected data as targets to compute output layer weights
+ temp = pinv([act ones(ndata, 1)]) * xproj;
+ net.w2 = temp(1:net.nhidden, :);
+ net.b2 = temp(net.nhidden+1, :);
+ % Propagate again to compute revised outputs
+ [y, act] = rbffwd(net, x);
+end
+
+switch net.outfn
+case 'linear'
+ % Sum of squares error function in regression model
+ % Solve for the weights and biases using pseudo-inverse from activations
+ Phi = [act ones(ndata, 1)];
+ if ~isfield(net, 'alpha')
+ % Solve for the weights and biases using left matrix divide
+ temp = pinv(Phi)*t;
+ elseif size(net.alpha == [1 1])
+ % Use normal form equation
+ hessian = Phi'*Phi + net.alpha*eye(net.nhidden+1);
+ temp = pinv(hessian)*(Phi'*t);
+ else
+ error('Only scalar alpha allowed');
+ end
+ net.w2 = temp(1:net.nhidden, :);
+ net.b2 = temp(net.nhidden+1, :);
+
+case 'neuroscale'
+ % Use the shadow targets training algorithm
+ if nargin < 4
+ % If optional input distances not passed in, then use
+ % Euclidean distance
+ x_dist = sqrt(dist2(x, x));
+ else
+ x_dist = t;
+ end
+ Phi = [act, ones(ndata, 1)];
+ % Compute the pseudo-inverse of Phi
+ PhiDag = pinv(Phi);
+ % Compute y_dist, distances between image points
+ y_dist = sqrt(dist2(y, y));
+
+ % Save old weights so that we can check the termination criterion
+ wold = netpak(net);
+ % Compute initial error (stress) value
+ errold = 0.5*(sum(sum((x_dist - y_dist).^2)));
+
+ % Initial value for eta
+ eta = 0.1;
+ k_up = 1.2;
+ k_down = 0.1;
+ success = 1; % Force initial gradient calculation
+
+ for j = 1:options(14)
+ if success
+ % Compute the negative error gradient with respect to network outputs
+ D = (x_dist - y_dist)./(y_dist+(y_dist==0));
+ temp = y';
+ neg_gradient = -2.*sum(kron(D, ones(1, net.nout)) .* ...
+ (repmat(y, 1, ndata) - repmat((temp(:))', ndata, 1)), 1);
+ neg_gradient = (reshape(neg_gradient, net.nout, ndata))';
+ end
+ % Compute the shadow targets
+ t = y + eta*neg_gradient;
+ % Solve for the weights and biases
+ temp = PhiDag * t;
+ net.w2 = temp(1:net.nhidden, :);
+ net.b2 = temp(net.nhidden+1, :);
+
+ % Do housekeeping and test for convergence
+ ynew = rbffwd(net, x);
+ y_distnew = sqrt(dist2(ynew, ynew));
+ err = 0.5.*(sum(sum((x_dist-y_distnew).^2)));
+ if err > errold
+ success = 0;
+ % Restore previous weights
+ net = netunpak(net, wold);
+ err = errold;
+ eta = eta * k_down;
+ else
+ success = 1;
+ eta = eta * k_up;
+ errold = err;
+ y = ynew;
+ y_dist = y_distnew;
+ if test & j > 1
+ w = netpak(net);
+ if (max(abs(w - wold)) < options(2) & abs(err-errold) < options(3))
+ options(8) = err;
+ return;
+ end
+ end
+ wold = netpak(net);
+ end
+ if options(1)
+ fprintf(1, 'Cycle %4d Error %11.6f\n', j, err)
+ end
+ if nargout >= 3
+ errlog(j) = err;
+ end
+ end
+ options(8) = errold;
+ if (options(1) >= 0)
+ disp('Warning: Maximum number of iterations has been exceeded');
+ end
+otherwise
+ error(['Unknown output function ', net.outfn]);
+
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rbfunpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rbfunpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,43 @@
+function net = rbfunpak(net, w)
+%RBFUNPAK Separates a vector of RBF weights into its components.
+%
+% Description
+% NET = RBFUNPAK(NET, W) takes an RBF network data structure NET and a
+% weight vector W, and returns a network data structure identical to
+% the input network, except that the centres C, the widths WI, the
+% second-layer weight matrix W2 and the second-layer bias vector B2
+% have all been set to the corresponding elements of W.
+%
+% See also
+% RBFPAK, RBF
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'rbf');
+if ~errstring
+ error(errstring);
+end
+
+if net.nwts ~= length(w)
+ error('Invalid length of weight vector')
+end
+
+nin = net.nin;
+nhidden = net.nhidden;
+nout = net.nout;
+
+mark1 = nin*nhidden;
+net.c = reshape(w(1:mark1), nhidden, nin);
+if strcmp(net.actfn, 'gaussian')
+ mark2 = mark1 + nhidden;
+ net.wi = reshape(w(mark1+1:mark2), 1, nhidden);
+else
+ mark2 = mark1;
+ net.wi = [];
+end
+mark3 = mark2 + nhidden*nout;
+net.w2 = reshape(w(mark2+1:mark3), nhidden, nout);
+mark4 = mark3 + nout;
+net.b2 = reshape(w(mark3+1:mark4), 1, nout);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rosegrad.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rosegrad.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,20 @@
+function g = rosegrad(x)
+%ROSEGRAD Calculate gradient of Rosenbrock's function.
+%
+% Description
+% G = ROSEGRAD(X) computes the gradient of Rosenbrock's function at
+% each row of X, which should have two columns.
+%
+% See also
+% DEMOPT1, ROSEN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Return gradient of Rosenbrock's test function
+
+nrows = size(x, 1);
+g = zeros(nrows,2);
+
+g(:,1) = -400 * (x(:,2) - x(:,1).^2) * x(:,1) - 2 * (1 - x(:,1));
+g(:,2) = 200 * (x(:,2) - x(:,1).^2);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/rosen.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/rosen.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,16 @@
+function y = rosen(x)
+%ROSEN Calculate Rosenbrock's function.
+%
+% Description
+% Y = ROSEN(X) computes the value of Rosenbrock's function at each row
+% of X, which should have two columns.
+%
+% See also
+% DEMOPT1, ROSEGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Calculate value of Rosenbrock's function: x should be nrows by 2 columns
+
+y = 100 * ((x(:,2) - x(:,1).^2).^2) + (1.0 - x(:,1)).^2;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/scg.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/scg.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,208 @@
+function [x, options, flog, pointlog, scalelog] = scg(f, x, options, gradf, varargin)
+%SCG Scaled conjugate gradient optimization.
+%
+% Description
+% [X, OPTIONS] = SCG(F, X, OPTIONS, GRADF) uses a scaled conjugate
+% gradients algorithm to find a local minimum of the function F(X)
+% whose gradient is given by GRADF(X). Here X is a row vector and F
+% returns a scalar value. The point at which F has a local minimum is
+% returned as X. The function value at that point is returned in
+% OPTIONS(8).
+%
+% [X, OPTIONS, FLOG, POINTLOG, SCALELOG] = SCG(F, X, OPTIONS, GRADF)
+% also returns (optionally) a log of the function values after each
+% cycle in FLOG, a log of the points visited in POINTLOG, and a log of
+% the scale values in the algorithm in SCALELOG.
+%
+% SCG(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional arguments to
+% be passed to F() and GRADF(). The optional parameters have the
+% following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values; also logs error
+% values in the return argument ERRLOG, and the points visited in the
+% return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
+% warning messages are displayed. If OPTIONS(1) is -1, then nothing is
+% displayed.
+%
+% OPTIONS(2) is a measure of the absolute precision required for the
+% value of X at the solution. If the absolute difference between the
+% values of X between two successive steps is less than OPTIONS(2),
+% then this condition is satisfied.
+%
+% OPTIONS(3) is a measure of the precision required of the objective
+% function at the solution. If the absolute difference between the
+% objective function values between two successive steps is less than
+% OPTIONS(3), then this condition is satisfied. Both this and the
+% previous condition must be satisfied for termination.
+%
+% OPTIONS(9) is set to 1 to check the user defined gradient function.
+%
+% OPTIONS(10) returns the total number of function evaluations
+% (including those in any line searches).
+%
+% OPTIONS(11) returns the total number of gradient evaluations.
+%
+% OPTIONS(14) is the maximum number of iterations; default 100.
+%
+% See also
+% CONJGRAD, QUASINEW
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Set up the options.
+if length(options) < 18
+ error('Options vector too short')
+end
+
+if(options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+
+display = options(1);
+gradcheck = options(9);
+
+% Set up strings for evaluating function and gradient
+f = fcnchk(f, length(varargin));
+gradf = fcnchk(gradf, length(varargin));
+
+nparams = length(x);
+
+% Check gradients
+if (gradcheck)
+ feval('gradchek', x, f, gradf, varargin{:});
+end
+
+sigma0 = 1.0e-4;
+fold = feval(f, x, varargin{:}); % Initial function value.
+fnow = fold;
+options(10) = options(10) + 1; % Increment function evaluation counter.
+gradnew = feval(gradf, x, varargin{:}); % Initial gradient.
+gradold = gradnew;
+options(11) = options(11) + 1; % Increment gradient evaluation counter.
+d = -gradnew; % Initial search direction.
+success = 1; % Force calculation of directional derivs.
+nsuccess = 0; % nsuccess counts number of successes.
+beta = 1.0; % Initial scale parameter.
+betamin = 1.0e-15; % Lower bound on scale.
+betamax = 1.0e100; % Upper bound on scale.
+j = 1; % j counts number of iterations.
+if nargout >= 3
+ flog(j, :) = fold;
+ if nargout == 4
+ pointlog(j, :) = x;
+ end
+end
+
+% Main optimization loop.
+while (j <= niters)
+
+ % Calculate first and second directional derivatives.
+ if (success == 1)
+ mu = d*gradnew';
+ if (mu >= 0)
+ d = - gradnew;
+ mu = d*gradnew';
+ end
+ kappa = d*d';
+ if kappa < eps
+ options(8) = fnow;
+ return
+ end
+ sigma = sigma0/sqrt(kappa);
+ xplus = x + sigma*d;
+ gplus = feval(gradf, xplus, varargin{:});
+ options(11) = options(11) + 1;
+ theta = (d*(gplus' - gradnew'))/sigma;
+ end
+
+ % Increase effective curvature and evaluate step size alpha.
+ delta = theta + beta*kappa;
+ if (delta <= 0)
+ delta = beta*kappa;
+ beta = beta - theta/kappa;
+ end
+ alpha = - mu/delta;
+
+ % Calculate the comparison ratio.
+ xnew = x + alpha*d;
+ fnew = feval(f, xnew, varargin{:});
+ options(10) = options(10) + 1;
+ Delta = 2*(fnew - fold)/(alpha*mu);
+ if (Delta >= 0)
+ success = 1;
+ nsuccess = nsuccess + 1;
+ x = xnew;
+ fnow = fnew;
+ else
+ success = 0;
+ fnow = fold;
+ end
+
+ if nargout >= 3
+ % Store relevant variables
+ flog(j) = fnow; % Current function value
+ if nargout >= 4
+ pointlog(j,:) = x; % Current position
+ if nargout >= 5
+ scalelog(j) = beta; % Current scale parameter
+ end
+ end
+ end
+ if display > 0
+ fprintf(1, 'Cycle %4d Error %11.6f Scale %e\n', j, fnow, beta);
+ end
+
+ if (success == 1)
+ % Test for termination
+
+ if (max(abs(alpha*d)) < options(2) & max(abs(fnew-fold)) < options(3))
+ options(8) = fnew;
+ return;
+
+ else
+ % Update variables for new position
+ fold = fnew;
+ gradold = gradnew;
+ gradnew = feval(gradf, x, varargin{:});
+ options(11) = options(11) + 1;
+ % If the gradient is zero then we are done.
+ if (gradnew*gradnew' == 0)
+ options(8) = fnew;
+ return;
+ end
+ end
+ end
+
+ % Adjust beta according to comparison ratio.
+ if (Delta < 0.25)
+ beta = min(4.0*beta, betamax);
+ end
+ if (Delta > 0.75)
+ beta = max(0.5*beta, betamin);
+ end
+
+ % Update search direction using Polak-Ribiere formula, or re-start
+ % in direction of negative gradient after nparams steps.
+ if (nsuccess == nparams)
+ d = -gradnew;
+ nsuccess = 0;
+ else
+ if (success == 1)
+ gamma = (gradold - gradnew)*gradnew'/(mu);
+ d = gamma*d - gradnew;
+ end
+ end
+ j = j + 1;
+end
+
+% If we get here, then we haven't terminated in the given number of
+% iterations.
+
+options(8) = fold;
+if (options(1) >= 0)
+ disp(maxitmess);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/som.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/som.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,55 @@
+function net = som(nin, map_size)
+%SOM Creates a Self-Organising Map.
+%
+% Description
+% NET = SOM(NIN, MAP_SIZE) creates a SOM NET with input dimension (i.e.
+% data dimension) NIN and map dimensions MAP_SIZE. Only two-
+% dimensional maps are currently implemented.
+%
+% The fields in NET are
+% type = 'som'
+% nin = number of inputs
+% map_dim = dimension of map (constrained to be 2)
+% map_size = grid size: number of nodes in each dimension
+% num_nodes = number of nodes: the product of values in map_size
+% map = map_dim+1 dimensional array containing nodes
+% inode_dist = map of inter-node distances using Manhatten metric
+%
+% The map contains the node vectors arranged column-wise in the first
+% dimension of the array.
+%
+% See also
+% KMEANS, SOMFWD, SOMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+net.type = 'som';
+net.nin = nin;
+
+% Create Map of nodes
+if round(map_size) ~= map_size | (map_size < 1)
+ error('SOM specification must contain positive integers');
+end
+
+net.map_dim = length(map_size);
+if net.map_dim ~= 2
+ error('SOM is a 2 dimensional map');
+end
+net.num_nodes = prod(map_size);
+% Centres are stored by column as first index of multi-dimensional array.
+% This makes extracting them later more easy.
+% Initialise with rand to create square grid
+net.map = rand([nin, map_size]);
+net.map_size = map_size;
+
+% Crude function to compute inter-node distances
+net.inode_dist = zeros([map_size, net.num_nodes]);
+for m = 1:net.num_nodes
+ node_loc = [1+fix((m-1)/map_size(2)), 1+rem((m-1),map_size(2))];
+ for k = 1:map_size(1)
+ for l = 1:map_size(2)
+ net.inode_dist(k, l, m) = round(max(abs([k l] - node_loc)));
+ end
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/somfwd.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/somfwd.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,30 @@
+function [d2, win_nodes] = somfwd(net, x)
+%SOMFWD Forward propagation through a Self-Organising Map.
+%
+% Description
+% D2 = SOMFWD(NET, X) propagates the data matrix X through a SOM NET,
+% returning the squared distance matrix D2 with dimension NIN by
+% NUM_NODES. The $i$th row represents the squared Euclidean distance
+% to each of the nodes of the SOM.
+%
+% [D2, WIN_NODES] = SOMFWD(NET, X) also returns the indices of the
+% winning nodes for each pattern.
+%
+% See also
+% SOM, SOMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check for consistency
+errstring = consist(net, 'som', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+% Turn nodes into matrix of centres
+nodes = (reshape(net.map, net.nin, net.num_nodes))';
+% Compute squared distance matrix
+d2 = dist2(x, nodes);
+% Find winning node for each pattern: minimum value in each row
+[w, win_nodes] = min(d2, [], 2);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/sompak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/sompak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+function [c] = sompak(net)
+%SOMPAK Combines node weights into one weights matrix.
+%
+% Description
+% C = SOMPAK(NET) takes a SOM data structure NET and combines the node
+% weights into a matrix of centres C where each row represents the node
+% vector.
+%
+% The ordering of the parameters in W is defined by the indexing of the
+% multi-dimensional array NET.MAP.
+%
+% See also
+% SOM, SOMUNPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'som');
+if ~isempty(errstring)
+ error(errstring);
+end
+% Returns map as a sequence of row vectors
+c = (reshape(net.map, net.nin, net.num_nodes))';
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/somtrain.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/somtrain.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,157 @@
+function net = somtrain(net, options, x)
+%SOMTRAIN Kohonen training algorithm for SOM.
+%
+% Description
+% NET = SOMTRAIN{NET, OPTIONS, X) uses Kohonen's algorithm to train a
+% SOM. Both on-line and batch algorithms are implemented. The learning
+% rate (for on-line) and neighbourhood size decay linearly. There is no
+% error function minimised during training (so there is no termination
+% criterion other than the number of epochs), but the sum-of-squares
+% is computed and returned in OPTIONS(8).
+%
+% The optional parameters have the following interpretations.
+%
+% OPTIONS(1) is set to 1 to display error values; also logs learning
+% rate ALPHA and neighbourhood size NSIZE. Otherwise nothing is
+% displayed.
+%
+% OPTIONS(5) determines whether the patterns are sampled randomly with
+% replacement. If it is 0 (the default), then patterns are sampled in
+% order. This is only relevant to the on-line algorithm.
+%
+% OPTIONS(6) determines if the on-line or batch algorithm is used. If
+% it is 1 then the batch algorithm is used. If it is 0 (the default)
+% then the on-line algorithm is used.
+%
+% OPTIONS(14) is the maximum number of iterations (passes through the
+% complete pattern set); default 100.
+%
+% OPTIONS(15) is the final neighbourhood size; default value is the
+% same as the initial neighbourhood size.
+%
+% OPTIONS(16) is the final learning rate; default value is the same as
+% the initial learning rate.
+%
+% OPTIONS(17) is the initial neighbourhood size; default 0.5*maximum
+% map size.
+%
+% OPTIONS(18) is the initial learning rate; default 0.9. This
+% parameter must be positive.
+%
+% See also
+% KMEANS, SOM, SOMFWD
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+% Check arguments for consistency
+errstring = consist(net, 'som', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+% Set number of iterations in convergence phase
+if (~options(14))
+ options(14) = 100;
+end
+niters = options(14);
+
+% Learning rate must be positive
+if (options(18) > 0)
+ alpha_first = options(18);
+else
+ alpha_first = 0.9;
+end
+% Final learning rate must be no greater than initial learning rate
+if (options(16) > alpha_first | options(16) < 0)
+ alpha_last = alpha_first;
+else
+ alpha_last = options(16);
+end
+
+% Neighbourhood size
+if (options(17) >= 0)
+ nsize_first = options(17);
+else
+ nsize_first = max(net.map_dim)/2;
+end
+% Final neighbourhood size must be no greater than initial size
+if (options(15) > nsize_first | options(15) < 0)
+ nsize_last = nsize_first;
+else
+ nsize_last = options(15);
+end
+
+ndata = size(x, 1);
+
+if options(6)
+ % Batch algorithm
+ H = zeros(ndata, net.num_nodes);
+end
+% Put weights into matrix form
+tempw = sompak(net);
+
+% Then carry out training
+j = 1;
+while j <= niters
+ if options(6)
+ % Batch version of algorithm
+ alpha = 0.0;
+ frac_done = (niters - j)/niters;
+ % Compute neighbourhood
+ nsize = round((nsize_first - nsize_last)*frac_done + nsize_last);
+
+ % Find winning node: put weights back into net so that we can
+ % call somunpak
+ net = somunpak(net, tempw);
+ [temp, bnode] = somfwd(net, x);
+ for k = 1:ndata
+ H(k, :) = reshape(net.inode_dist(:, :, bnode(k))<=nsize, ...
+ 1, net.num_nodes);
+ end
+ s = sum(H, 1);
+ for k = 1:net.num_nodes
+ if s(k) > 0
+ tempw(k, :) = sum((H(:, k)*ones(1, net.nin)).*x, 1)/ ...
+ s(k);
+ end
+ end
+ else
+ % On-line version of algorithm
+ if options(5)
+ % Randomise order of pattern presentation: with replacement
+ pnum = ceil(rand(ndata, 1).*ndata);
+ else
+ pnum = 1:ndata;
+ end
+ % Cycle through dataset
+ for k = 1:ndata
+ % Fraction done
+ frac_done = (((niters+1)*ndata)-(j*ndata + k))/((niters+1)*ndata);
+ % Compute learning rate
+ alpha = (alpha_first - alpha_last)*frac_done + alpha_last;
+ % Compute neighbourhood
+ nsize = round((nsize_first - nsize_last)*frac_done + nsize_last);
+ % Find best node
+ pat_diff = ones(net.num_nodes, 1)*x(pnum(k), :) - tempw;
+ [temp, bnode] = min(sum(abs(pat_diff), 2));
+
+ % Now update neighbourhood
+ neighbourhood = (net.inode_dist(:, :, bnode) <= nsize);
+ tempw = tempw + ...
+ ((alpha*(neighbourhood(:)))*ones(1, net.nin)).*pat_diff;
+ end
+ end
+ if options(1)
+ % Print iteration information
+ fprintf(1, 'Iteration %d; alpha = %f, nsize = %f. ', j, alpha, ...
+ nsize);
+ % Print sum squared error to nearest node
+ d2 = dist2(tempw, x);
+ fprintf(1, 'Error = %f\n', sum(min(d2)));
+ end
+ j = j + 1;
+end
+
+net = somunpak(net, tempw);
+options(8) = sum(min(dist2(tempw, x)));
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/somunpak.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/somunpak.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,23 @@
+function net = somunpak(net, w)
+%SOMUNPAK Replaces node weights in SOM.
+%
+% Description
+% NET = SOMUNPAK(NET, W) takes a SOM data structure NET and weight
+% matrix W (each node represented by a row) and puts the nodes back
+% into the multi-dimensional array NET.MAP.
+%
+% The ordering of the parameters in W is defined by the indexing of the
+% multi-dimensional array NET.MAP.
+%
+% See also
+% SOM, SOMPAK
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+errstring = consist(net, 'som');
+if ~isempty(errstring)
+ error(errstring);
+end
+% Put weights back into network data structure
+net.map = reshape(w', [net.nin net.map_size]);
\ No newline at end of file
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlab3.3/xor.dat
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlab3.3/xor.dat Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,15 @@
+nin 2
+nout 1
+ndata 12
+1 0 1
+0 1 1
+0 0 0
+1 1 0
+1 0 1
+0 1 1
+0 0 0
+1 1 0
+1 0 1
+0 1 1
+0 0 0
+1 1 0
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/CVS/Entries
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/CVS/Entries Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+/README.txt/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/demgmm1_movie.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/evidence_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/glmerr_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/glmgrad_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/glmhess_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/glmtrain_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/gmm1.avi/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/gmmem2.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/gmmem_multi_restart.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/kmeans_demo.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/mlperr_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/mlpgrad_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/mlphdotv_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/mlphess_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/neterr_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/netgrad_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/nethess_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/netopt_weighted.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+/process_options.m/1.1.1.1/Wed Apr 27 17:59:16 2005//
+D
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/CVS/Repository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/CVS/Repository Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+FullBNT/netlabKPM
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/CVS/Root
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/CVS/Root Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,1 @@
+:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/README.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/README.txt Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,10 @@
+This directory contains some small modifications/additions to
+netlab, http://www.ncrg.aston.ac.uk/netlab/.
+
+- gmmem2 allows a prior on the covariance, and lets you visualize
+intermediate results of the learning
+- gmmem_multi_restart is self-explanatory
+
+All the _weighted functions were written by Pierpaolo Brutti.
+They are needed by BNT (softmax_CPD and mlp_CPD
+maximize_params method).
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/demgmm1_movie.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/demgmm1_movie.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,103 @@
+%DEMGMM1 Demonstrate EM for Gaussian mixtures.
+%
+% Description
+% This script demonstrates the use of the EM algorithm to fit a mixture
+% of Gaussians to a set of data using maximum likelihood. A colour
+% coding scheme is used to illustrate the evaluation of the posterior
+% probabilities in the E-step of the EM algorithm.
+%
+% See also
+% DEMGMM2, DEMGMM3, DEMGMM4, GMM, GMMEM, GMMPOST
+%
+
+% Copyright (c) Ian T Nabney (1996-2001)
+
+mov = avifile('movies/gmm1.avi','fps',1 );
+
+% Generate the data
+randn('state', 0); rand('state', 0);
+gmix = gmm(2, 2, 'spherical');
+ndat1 = 20; ndat2 = 20; ndata = ndat1+ndat2;
+gmix.centres = [0.3 0.3; 0.7 0.7];
+gmix.covars = [0.01 0.01];
+x = gmmsamp(gmix, ndata);
+
+h = figure;
+hd = plot(x(:, 1), x(:, 2), '.g', 'markersize', 30);
+hold on; axis([0 1 0 1]); axis square; set(gca, 'box', 'on');
+ht = text(0.5, 1.05, 'Data', 'horizontalalignment', 'center');
+
+
+% Set up mixture model
+ncentres = 2; input_dim = 2;
+mix = gmm(input_dim, ncentres, 'spherical');
+
+% Initialise the mixture model
+mix.centres = [0.2 0.8; 0.8, 0.2];
+mix.covars = [0.01 0.01];
+
+% Plot the initial model
+ncirc = 30; theta = linspace(0, 2*pi, ncirc);
+xs = cos(theta); ys = sin(theta);
+xvals = mix.centres(:, 1)*ones(1,ncirc) + sqrt(mix.covars')*xs;
+yvals = mix.centres(:, 2)*ones(1,ncirc) + sqrt(mix.covars')*ys;
+hc(1)=line(xvals(1,:), yvals(1,:), 'color', 'r');
+hc(2)=line(xvals(2,:), yvals(2,:), 'color', 'b');
+set(ht, 'string', 'Initial Configuration');
+figure(h);
+mov = addframe(mov, getframe(gcf));
+mov = addframe(mov, getframe(gcf));
+
+% Initial E-step.
+set(ht, 'string', 'E-step');
+post = gmmpost(mix, x);
+dcols = [post(:,1), zeros(ndata, 1), post(:,2)];
+delete(hd);
+for i = 1 : ndata
+ hd(i) = plot(x(i, 1), x(i, 2), 'color', dcols(i,:), ...
+ 'marker', '.', 'markersize', 30);
+end
+
+% M-step.
+set(ht, 'string', 'M-step');
+options = foptions;
+options(14) = 1; % A single iteration
+options(1) = -1; % Switch off all messages, including warning
+mix = gmmem(mix, x, options);
+delete(hc);
+xvals = mix.centres(:, 1)*ones(1,ncirc) + sqrt(mix.covars')*xs;
+yvals = mix.centres(:, 2)*ones(1,ncirc) + sqrt(mix.covars')*ys;
+hc(1)=line(xvals(1,:), yvals(1,:), 'color', 'r');
+hc(2)=line(xvals(2,:), yvals(2,:), 'color', 'b');
+figure(h);
+mov = addframe(mov, getframe(gcf));
+mov = addframe(mov, getframe(gcf));
+
+% Loop over EM iterations.
+numiters = 9;
+for n = 1 : numiters
+
+ set(ht, 'string', 'E-step');
+ post = gmmpost(mix, x);
+ dcols = [post(:,1), zeros(ndata, 1), post(:,2)];
+ delete(hd);
+ for i = 1 : ndata
+ hd(i) = plot(x(i, 1), x(i, 2), 'color', dcols(i,:), ...
+ 'marker', '.', 'markersize', 30);
+ end
+ %pause(1)
+
+ set(ht, 'string', 'M-step');
+ [mix, options] = gmmem(mix, x, options);
+ fprintf(1, 'Cycle %4d Error %11.6f\n', n, options(8));
+ delete(hc);
+ xvals = mix.centres(:, 1)*ones(1,ncirc) + sqrt(mix.covars')*xs;
+ yvals = mix.centres(:, 2)*ones(1,ncirc) + sqrt(mix.covars')*ys;
+ hc(1)=line(xvals(1,:), yvals(1,:), 'color', 'r');
+ hc(2)=line(xvals(2,:), yvals(2,:), 'color', 'b');
+ pause(1)
+
+ mov = addframe(mov, getframe(gcf));
+end
+
+mov = close(mov);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/evidence_weighted.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/evidence_weighted.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,104 @@
+function [net, gamma, logev] = evidence_weighted(net, x, t, eso_w, num)
+%EVIDENCE Re-estimate hyperparameters using evidence approximation.
+%
+% Description
+% [NET] = EVIDENCE(NET, X, T) re-estimates the hyperparameters ALPHA
+% and BETA by applying Bayesian re-estimation formulae for NUM
+% iterations. The hyperparameter ALPHA can be a simple scalar
+% associated with an isotropic prior on the weights, or can be a vector
+% in which each component is associated with a group of weights as
+% defined by the INDEX matrix in the NET data structure. These more
+% complex priors can be set up for an MLP using MLPPRIOR. Initial
+% values for the iterative re-estimation are taken from the network
+% data structure NET passed as an input argument, while the return
+% argument NET contains the re-estimated values.
+%
+% [NET, GAMMA, LOGEV] = EVIDENCE(NET, X, T, NUM) allows the re-
+% estimation formula to be applied for NUM cycles in which the re-
+% estimated values for the hyperparameters from each cycle are used to
+% re-evaluate the Hessian matrix for the next cycle. The return value
+% GAMMA is the number of well-determined parameters and LOGEV is the
+% log of the evidence.
+%
+% See also
+% MLPPRIOR, NETGRAD, NETHESS, DEMEV1, DEMARD
+%
+
+% Copyright (c) Ian T Nabney (1996-9)
+
+errstring = consist(net, '', x, t);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+ndata = size(x, 1);
+if nargin == 4
+ num = 1;
+end
+
+if isfield(net,'beta')
+ beta = net.beta;
+else
+ beta = 1;
+end;
+
+% Extract weights from network
+pakstr = [net.type, 'pak'];
+w = feval(pakstr, net);
+
+% Evaluate data-dependent contribution to the Hessian matrix.
+[h, dh] = nethess_weighted(w, net, x, t, eso_w);
+
+% Now set the negative eigenvalues to zero.
+[evec, evl] = eig(dh);
+evl = evl.*(evl > 0);
+% safe_evl is used to avoid taking log of zero
+safe_evl = evl + eps.*(evl <= 0);
+
+% Do the re-estimation.
+for k = 1 : num
+ [e, edata, eprior] = neterr_weighted(w, net, x, t, eso_w);
+ h = nethess_weighted(w, net, x, t, eso_w, dh);
+ % Re-estimate alpha.
+ if size(net.alpha) == [1 1]
+ % Evaluate number of well-determined parameters.
+ if k == 1
+ % Form vector of eigenvalues
+ evl = diag(evl);
+ safe_evl = diag(safe_evl);
+ end
+ B = beta*evl;
+ gamma = sum(B./(B + net.alpha));
+ net.alpha = 0.5*gamma/eprior;
+
+ % Partially evaluate log evidence
+ logev = e - 0.5*sum(log(safe_evl)) + 0.5*net.nwts*log(net.alpha) - ...
+ 0.5*ndata*log(2*pi);
+ else
+ ngroups = size(net.alpha, 1);
+ gams = zeros(1, ngroups);
+ logas = zeros(1, ngroups);
+ traces = zeros(1, ngroups);
+ % Reconstruct data hessian with negative eigenvalues set to zero.
+ dh = evec*evl*evec';
+ hinv = inv(nethess_weighted(w, net, x, t, eso_w, dh));
+ for m = 1 : ngroups
+ group_nweights = sum(net.index(:, m));
+ gams(m) = group_nweights - ...
+ net.alpha(m)*sum(diag(hinv).*net.index(:,m));
+ net.alpha(m) = real(gams(m)/(2*eprior(m)));
+ % Weight alphas by number of weights in group
+ logas(m) = 0.5*group_nweights*log(net.alpha(m));
+ % Compute sum of evalues corresponding to group
+ traces(m) = sum(log(safe_evl*net.index(:,m)));
+ end
+ gamma = sum(gams, 2);
+ logev = e - 0.5*sum(traces) + sum(logas) - 0.5*ndata*log(2*pi);
+ end
+ % Re-estimate beta.
+ if isfield(net, 'beta')
+ net.beta = 0.5*(net.nout*ndata - gamma)/edata;
+ end
+ logev = logev + 0.5*ndata*log(beta);
+end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/glmerr_weighted.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/glmerr_weighted.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,57 @@
+function [e, edata, eprior, y, a] = glmerr_weighted(net, x, t, eso_w)
+%GLMERR Evaluate error function for generalized linear model.
+%
+% Description
+% E = GLMERR(NET, X, T) takes a generalized linear model data
+% structure NET together with a matrix X of input vectors and a matrix
+% T of target vectors, and evaluates the error function E. The choice
+% of error function corresponds to the output unit activation function.
+% Each row of X corresponds to one input vector and each row of T
+% corresponds to one target vector.
+%
+% [E, EDATA, EPRIOR, Y, A] = GLMERR(NET, X, T) also returns the data
+% and prior components of the total error.
+%
+% [E, EDATA, EPRIOR, Y, A] = GLMERR(NET, X) also returns a matrix Y
+% giving the outputs of the models and a matrix A giving the summed
+% inputs to each output unit, where each row corresponds to one
+% pattern.
+%
+% See also
+% GLM, GLMPAK, GLMUNPAK, GLMFWD, GLMGRAD, GLMTRAIN
+%
+
+% Copyright (c) Ian T Nabney (1996-9)
+
+% Check arguments for consistency
+errstring = consist(net, 'glm', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+
+[y, a] = glmfwd(net, x);
+
+%switch net.actfn
+ switch net.outfn
+
+ case 'softmax' % Softmax outputs
+
+ nout = size(a,2);
+ % Ensure that sum(exp(a), 2) does not overflow
+ maxcut = log(realmax) - log(nout);
+ % Ensure that exp(a) > 0
+ mincut = log(realmin);
+ a = min(a, maxcut);
+ a = max(a, mincut);
+ temp = exp(a);
+ y = temp./(sum(temp, 2)*ones(1,nout));
+ % Ensure that log(y) is computable
+ y(y errold
+ errold = err;
+ w = wold;
+ options(8) = err;
+ fprintf(1, 'Error has increased: terminating\n')
+ return;
+ end
+ if test & n > 1
+ if (max(abs(w - wold)) < options(2) & abs(err-errold) < options(3))
+ options(8) = err;
+ return;
+ else
+ errold = err;
+ wold = w;
+ end
+ end
+end
+
+options(8) = err;
+if (options(1) > 0)
+ disp('Warning: Maximum number of iterations has been exceeded');
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/gmm1.avi
Binary file toolboxes/FullBNT-1.0.7/netlabKPM/gmm1.avi has changed
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/gmmem2.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/gmmem2.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,186 @@
+function [mix, num_iter, ll] = gmmem_kpm(mix, x, varargin)
+%GMMEM_KPM Like GMMEM, but with additional optional arguments
+% function [mix, num_iter, ll] = gmmem_kpm(mix, x, varargin)
+%
+% Input:
+% mix - structure created by gmminit or gmmem_multi_restart
+% data - each row is an example
+%
+% Output:
+% mix - modified structure
+% num_iter - number of iterations needed to reach convergence
+% ll - final log likelihood
+%
+% [ ... ] = gmmem_kpm(..., 'param1',val1, 'param2',val2, ...) allows you to
+% specify optional parameter name/value pairs.
+% Parameters are below [default value in brackets]
+%
+% 'max_iter' - maximum number of EM iterations [10]
+% 'll_thresh' - change in log-likelihood threshold for convergence [1e-2]
+% 'verbose' - 1 means display output while running [0]
+% 'prior_cov' - this will be added to each estimated covariance
+% to prevent singularities [1e-3*eye(d)]
+% 'fn' - this function, if non-empty, will be called at every iteration
+% (e.g., to display the parameters as they evolve) [ [] ]
+% The fn is called as fn(mix, x, iter_num, fnargs).
+% It is also called before the iteration starts as
+% fn(mix, x, -1, fnargs), which can be used to initialize things.
+% 'fnargs' - additional arguments to be passed to fn [ {} ]
+%
+% Modified by Kevin P Murphy, 29 Dec 2002
+
+
+% Check that inputs are consistent
+errstring = consist(mix, 'gmm', x);
+if ~isempty(errstring)
+ error(errstring);
+end
+
+[ndata, xdim] = size(x);
+
+[max_iter, ll_thresh, verbose, prior_cov, fn, fnargs] = ...
+ process_options(varargin, ...
+ 'max_iter', 10, 'll_thresh', 1e-2, 'verbose', 1, ...
+ 'prior_cov', 1e-3*eye(xdim), 'fn', [], 'fnargs', {});
+
+options = foptions;
+if verbose, options(1)=1; else options(1)=-1; end
+options(14) = max_iter;
+options(3) = ll_thresh;
+
+
+% Sort out the options
+if (options(14))
+ niters = options(14);
+else
+ niters = 100;
+end
+
+display = options(1);
+test = 0;
+if options(3) > 0.0
+ test = 1; % Test log likelihood for termination
+end
+
+check_covars = 0;
+if options(5) >= 1
+ if display >= 0
+ disp('check_covars is on');
+ end
+ check_covars = 1; % Ensure that covariances don't collapse
+ MIN_COVAR = eps; % Minimum singular value of covariance matrix
+ init_covars = mix.covars;
+end
+
+mix0 = mix; % save init values for debugging
+
+if ~isempty(fn)
+ feval(fn, mix, x, -1, fnargs{:});
+end
+
+% Main loop of algorithm
+for n = 1:niters
+
+ % Calculate posteriors based on old parameters
+ [post, act] = gmmpost(mix, x);
+
+ % Calculate error value if needed
+ if (display | test)
+ prob = act*(mix.priors)';
+ % Error value is negative log likelihood of data
+ e = - sum(log(prob + eps));
+ if display > 0
+ fprintf(1, 'Cycle %4d Error %11.6f\n', n, e);
+ end
+ if test
+ if (n > 1 & abs(e - eold) < options(3))
+ options(8) = e;
+ ll = -e;
+ num_iter = n;
+ return; %%%%%%%%%%%%%%%% Exit here if converged
+ else
+ eold = e;
+ end
+ end
+ end
+
+ if ~isempty(fn)
+ feval(fn, mix, x, n, fnargs{:});
+ end
+
+ % Adjust the new estimates for the parameters
+ new_pr = sum(post, 1);
+ new_c = post' * x;
+
+ % Now move new estimates to old parameter vectors
+ mix.priors = new_pr ./ ndata;
+
+ mix.centres = new_c ./ (new_pr' * ones(1, mix.nin));
+
+ switch mix.covar_type
+ case 'spherical'
+ n2 = dist2(x, mix.centres);
+ for j = 1:mix.ncentres
+ v(j) = (post(:,j)'*n2(:,j));
+ end
+ mix.covars = ((v./new_pr) + sum(diag(prior_cov)))./mix.nin;
+ if check_covars
+ % Ensure that no covariance is too small
+ for j = 1:mix.ncentres
+ if mix.covars(j) < MIN_COVAR
+ mix.covars(j) = init_covars(j);
+ end
+ end
+ end
+ case 'diag'
+ for j = 1:mix.ncentres
+ diffs = x - (ones(ndata, 1) * mix.centres(j,:));
+ wts = (post(:,j)*ones(1, mix.nin));
+ mix.covars(j,:) = sum((diffs.*diffs).*wts + prior_cov, 1)./new_pr(j);
+ end
+ if check_covars
+ % Ensure that no covariance is too small
+ for j = 1:mix.ncentres
+ if min(mix.covars(j,:)) < MIN_COVAR
+ mix.covars(j,:) = init_covars(j,:);
+ end
+ end
+ end
+ case 'full'
+ for j = 1:mix.ncentres
+ diffs = x - (ones(ndata, 1) * mix.centres(j,:));
+ diffs = diffs.*(sqrt(post(:,j))*ones(1, mix.nin));
+ mix.covars(:,:,j) = (diffs'*diffs + prior_cov)/new_pr(j);
+ end
+ if check_covars
+ % Ensure that no covariance is too small
+ for j = 1:mix.ncentres
+ if min(svd(mix.covars(:,:,j))) < MIN_COVAR
+ mix.covars(:,:,j) = init_covars(:,:,j);
+ end
+ end
+ end
+ case 'ppca'
+ for j = 1:mix.ncentres
+ diffs = x - (ones(ndata, 1) * mix.centres(j,:));
+ diffs = diffs.*(sqrt(post(:,j))*ones(1, mix.nin));
+ [mix.covars(j), mix.U(:,:,j), mix.lambda(j,:)] = ...
+ ppca((diffs'*diffs)/new_pr(j), mix.ppca_dim);
+ end
+ if check_covars
+ if mix.covars(j) < MIN_COVAR
+ mix.covars(j) = init_covars(j);
+ end
+ end
+ otherwise
+ error(['Unknown covariance type ', mix.covar_type]);
+ end
+end
+
+ll = sum(log(gmmprob(mix, x)));
+num_iter = n;
+
+%if (display >= 0)
+% disp('Warning: Maximum number of iterations has been exceeded');
+%end
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/gmmem_multi_restart.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/gmmem_multi_restart.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,65 @@
+function [means, covs, weights, ll] = gmmem_multi_restart(K, data, varargin)
+% GMMEM_MULTI_RESTART Multiple restart wrapper for gmmem_kpm
+% function [means, covs, weights, ll] = gmmem_multi_restart(K, data, varargin)
+%
+% Input:
+% K = number of mixture components
+% data(i,:) is the i'th example (feature vector)
+%
+% Output:
+% The parameters for the k'th mixture component, k=1:K, are
+% means(k,:), covs(:,:,k) and weights(k)
+%
+% [ ... ] = gmmem_multi_restart(..., 'param1',val1, 'param2',val2, ...)
+% allows you to specify optional parameter name/value pairs.
+% Parameters are below [default value in brackets]
+%
+% 'nrestarts' - number of EM restarts [2]
+% 'cov_type' - 'full', 'diag' or 'spherical' ['full']
+% 'init_cov' - the initial covariance matrix [0.1*cov(data) for each k]
+% 'init_means' - [] means sample from randn(); otherwise, use
+% init_means(k,:,r) for the k'th comp. on the r'th restart [ [] ]
+% 'restartfn' - this function, if non-empty, will be called before/after every restart
+% (e.g., to display the parameters as they evolve) [ [] ]
+% The fn is called as fn(mix{r}, data, restart_num, niter, outerfnargs)
+% where niter is the number of iterations performed (0 initially)
+% 'restartfnargs' - additional arguments to be passed to restartfn [ {} ]
+%
+% Optional arguments for gmmem_kpm are passed through.
+%
+% Written by Kevin P Murphy, 30 Dec 2002
+
+[ndata nfeatures] = size(data);
+
+%Cinit = repmat(0.1*diag(diag(cov(data))), [1 1 K]);
+Cinit = repmat(0.1*cov(data), [1 1 K]);
+
+[nrestarts, init_cov, init_means, cov_type, ...
+ restartfn, restartfnargs, unused_args] = ...
+ process_options(varargin, ...
+ 'nrestarts', 2, 'init_cov', Cinit, 'init_means', [], ...
+ 'cov_type', 'full', 'restartfn', [], 'restartfnargs', {});
+
+mix = cell(1, nrestarts);
+cost = inf*ones(1,nrestarts);
+
+for r=1:nrestarts
+ mix{r} = gmm(nfeatures, K, cov_type); % random centers
+ if ~isempty(init_means), mix{r}.centres = init_means(:,:,r); end
+ mix{r}.covars = init_cov;
+ if ~isempty(restartfn)
+ feval(restartfn, mix{r}, data, r, 0, restartfnargs{:});
+ end
+ [mix{r}, niter, ll] = gmmem_kpm(mix{r}, data, unused_args{:});
+ cost(r) = -ll; %-sum(log(gmmprob(mix{r}, data)));
+ if ~isempty(restartfn)
+ feval(restartfn, mix{r}, data, r, niter, restartfnargs{:});
+ end
+end
+
+[nll, bestr] = min(cost);
+fprintf('best r = %d\n', bestr);
+ll = -nll;
+means = mix{bestr}.centres;
+covs = mix{bestr}.covars;
+weights = mix{bestr}.priors;
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/kmeans_demo.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/kmeans_demo.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,76 @@
+function kmeans_demo()
+
+% Generate T points from K=5 1D clusters, and try to recover the cluster
+% centers using k-means.
+% Requires BNT, netlab and the matlab stats toolbox v4.
+
+K = 5;
+ndim = 1;
+true_centers = 1:K;
+sigma = 1e-6;
+T = 100;
+% data(t,:) is the t'th data point
+data = zeros(T, ndim);
+% ndx(t) = i means the t'th data point is sample from cluster i
+%ndx = sample_discrete(normalise(ones(1,K)));
+ndx = [1*ones(1,20) 2*ones(1,20) 3*ones(1,20) 4*ones(1,20) 5*ones(1,20)];
+for t=1:T
+ data(t) = sample_gaussian(true_centers(ndx(t)), sigma, 1);
+end
+plot(1:T, data, 'x')
+
+
+
+% set the centers randomly from Gauss(0)
+mix = gmm(ndim, K, 'spherical');
+h = plot_centers_as_lines(mix, [], T);
+
+if 0
+% Place initial centers at K data points chosen at random, but add some noise
+choose_ndx = randperm(T);
+choose_ndx = choose_ndx(1:K);
+init_centers = data(choose_ndx) + sample_gaussian(0, 0.1, K);
+mix.centres = init_centers;
+h = plot_centers_as_lines(mix, h, T);
+end
+
+if 0
+% update centers using netlab k-means
+options = foptions;
+niter = 10;
+options(14) = niter;
+mix = gmminit(mix, data, options);
+h = plot_centers_as_lines(mix, h, T);
+end
+
+% use matlab stats toolbox k-means with multiple restarts
+nrestarts = 5;
+[idx, centers] = kmeans(data, K, 'replicates', nrestarts, ...
+ 'emptyAction', 'singleton', 'display', 'iter');
+mix.centres = centers;
+h = plot_centers_as_lines(mix, h, T);
+
+% fine tune with EM; compute covariances of each cluster
+options = foptions;
+niter = 20;
+options(1) = 1; % display cost fn at each iter
+options(14) = niter;
+mix = gmmem(mix, data, options);
+h = plot_centers_as_lines(mix, h, T);
+
+%%%%%%%%%
+function h = plot_centers_as_lines(mix, h, T)
+
+K = mix.ncentres;
+hold on
+if isempty(h)
+ for k=1:K
+ h(k)=line([0 T], [mix.centres(k) mix.centres(k)]);
+ end
+else
+ for k=1:K
+ set(h(k), 'xdata', [0 T], 'ydata', [mix.centres(k) mix.centres(k)]);
+ end
+end
+hold off
+
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/mlperr_weighted.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/mlperr_weighted.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,68 @@
+function [e, edata, eprior] = mlperr_weighted(net, x, t, eso_w)
+%MLPERR Evaluate error function for 2-layer network.
+%
+% Description
+% E = MLPERR(NET, X, T) takes a network data structure NET together
+% with a matrix X of input vectors and a matrix T of target vectors,
+% and evaluates the error function E. The choice of error function
+% corresponds to the output unit activation function. Each row of X
+% corresponds to one input vector and each row of T corresponds to one
+% target vector.
+%
+% [E, EDATA, EPRIOR] = MLPERR(NET, X, T) additionally returns the data
+% and prior components of the error, assuming a zero mean Gaussian
+% prior on the weights with inverse variance parameters ALPHA and BETA
+% taken from the network data structure NET.
+%
+% See also
+% MLP, MLPPAK, MLPUNPAK, MLPFWD, MLPBKP, MLPGRAD
+%
+
+% Copyright (c) Ian T Nabney (1996-9)
+
+% Check arguments for consistency
+errstring = consist(net, 'mlp', x, t);
+if ~isempty(errstring);
+ error(errstring);
+end
+[y, z, a] = mlpfwd(net, x);
+
+switch net.actfn
+
+ case 'linear' %Linear outputs
+
+ edata = 0.5*sum(sum((y - t).^2));
+
+ case 'logistic' % Logistic outputs
+
+ % Ensure that log(1-y) is computable: need exp(a) > eps
+ maxcut = -log(eps);
+ % Ensure that log(y) is computable
+ mincut = -log(1/realmin - 1);
+ a = min(a, maxcut);
+ a = max(a, mincut);
+ y = 1./(1 + exp(-a));
+ edata = - sum(sum(t.*log(y) + (1 - t).*log(1 - y)));
+
+ case 'softmax' % Softmax outputs
+
+ nout = size(a,2);
+ % Ensure that sum(exp(a), 2) does not overflow
+ maxcut = log(realmax) - log(nout);
+ % Ensure that exp(a) > 0
+ mincut = log(realmin);
+ a = min(a, maxcut);
+ a = max(a, mincut);
+ temp = exp(a);
+ y = temp./(sum(temp, 2)*ones(1,nout));
+ % Ensure that log(y) is computable
+ y(y 1
+ for i = 2:nargout
+ varargout{i-1} = s{i};
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/netgrad_weighted.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/netgrad_weighted.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,21 @@
+function g = netgrad_weighted(w, net, x, t, eso_w)
+%NETGRAD Evaluate network error gradient for generic optimizers
+%
+% Description
+%
+% G = NETGRAD(W, NET, X, T) takes a weight vector W and a network data
+% structure NET, together with the matrix X of input vectors and the
+% matrix T of target vectors, and returns the gradient of the error
+% function evaluated at W.
+%
+% See also
+% MLP, NETERR, NETOPT
+%
+
+% Copyright (c) Ian T Nabney (1996-9)
+
+gradstr = [net.type, 'grad_weighted'];
+
+net = netunpak(net, w);
+
+g = feval(gradstr, net, x, t, eso_w);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/nethess_weighted.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/nethess_weighted.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,29 @@
+function [h, varargout] = nethess_weighted(w, net, x, t, eso_w, varargin)
+%NETHESS Evaluate network Hessian
+%
+% Description
+%
+% H = NETHESS(W, NET, X, T) takes a weight vector W and a network data
+% structure NET, together with the matrix X of input vectors and the
+% matrix T of target vectors, and returns the value of the Hessian
+% evaluated at W.
+%
+% [E, VARARGOUT] = NETHESS(W, NET, X, T, VARARGIN) also returns any
+% additional return values from the network Hessian function, and
+% passes additional arguments to that function.
+%
+% See also
+% NETERR, NETGRAD, NETOPT
+%
+
+% Copyright (c) Ian T Nabney (1996-9)
+
+hess_str = [net.type, 'hess_weighted'];
+
+net = netunpak(net, w);
+
+[s{1:nargout}] = feval(hess_str, net, x, t, eso_w, varargin{:});
+h = s{1};
+for i = 2:nargout
+ varargout{i-1} = s{i};
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/netopt_weighted.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/netopt_weighted.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,52 @@
+function [net, options, varargout] = netopt_weighted(net, options, x, t, eso_w, alg);
+%NETOPT Optimize the weights in a network model.
+%
+% Description
+%
+% NETOPT is a helper function which facilitates the training of
+% networks using the general purpose optimizers as well as sampling
+% from the posterior distribution of parameters using general purpose
+% Markov chain Monte Carlo sampling algorithms. It can be used with any
+% function that searches in parameter space using error and gradient
+% functions.
+%
+% [NET, OPTIONS] = NETOPT(NET, OPTIONS, X, T, ALG) takes a network
+% data structure NET, together with a vector OPTIONS of parameters
+% governing the behaviour of the optimization algorithm, a matrix X of
+% input vectors and a matrix T of target vectors, and returns the
+% trained network as well as an updated OPTIONS vector. The string ALG
+% determines which optimization algorithm (CONJGRAD, QUASINEW, SCG,
+% etc.) or Monte Carlo algorithm (such as HMC) will be used.
+%
+% [NET, OPTIONS, VARARGOUT] = NETOPT(NET, OPTIONS, X, T, ALG) also
+% returns any additional return values from the optimisation algorithm.
+%
+% See also
+% NETGRAD, BFGS, CONJGRAD, GRADDESC, HMC, SCG
+%
+
+% Copyright (c) Ian T Nabney (1996-9)
+
+optstring = [alg, '(''neterr_weighted'', w, options, ''netgrad_weighted'', net, x, t, eso_w)'];
+
+% Extract weights from network as single vector
+w = netpak(net);
+
+% Carry out optimisation
+[s{1:nargout}] = eval(optstring);
+w = s{1};
+
+if nargout > 1
+ options = s{2};
+
+ % If there are additional arguments, extract them
+ nextra = nargout - 2;
+ if nextra > 0
+ for i = 1:nextra
+ varargout{i} = s{i+2};
+ end
+ end
+end
+
+% Pack the weights back into the network
+net = netunpak(net, w);
diff -r 000000000000 -r e9a9cd732c1e toolboxes/FullBNT-1.0.7/netlabKPM/process_options.m
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/FullBNT-1.0.7/netlabKPM/process_options.m Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,132 @@
+% PROCESS_OPTIONS - Processes options passed to a Matlab function.
+% This function provides a simple means of
+% parsing attribute-value options. Each option is
+% named by a unique string and is given a default
+% value.
+%
+% Usage: [var1, var2, ..., varn[, unused]] = ...
+% process_options(args, ...
+% str1, def1, str2, def2, ..., strn, defn)
+%
+% Arguments:
+% args - a cell array of input arguments, such
+% as that provided by VARARGIN. Its contents
+% should alternate between strings and
+% values.
+% str1, ..., strn - Strings that are associated with a
+% particular variable
+% def1, ..., defn - Default values returned if no option
+% is supplied
+%
+% Returns:
+% var1, ..., varn - values to be assigned to variables
+% unused - an optional cell array of those
+% string-value pairs that were unused;
+% if this is not supplied, then a
+% warning will be issued for each
+% option in args that lacked a match.
+%
+% Examples:
+%
+% Suppose we wish to define a Matlab function 'func' that has
+% required parameters x and y, and optional arguments 'u' and 'v'.
+% With the definition
+%
+% function y = func(x, y, varargin)
+%
+% [u, v] = process_options(varargin, 'u', 0, 'v', 1);
+%
+% calling func(0, 1, 'v', 2) will assign 0 to x, 1 to y, 0 to u, and 2
+% to v. The parameter names are insensitive to case; calling
+% func(0, 1, 'V', 2) has the same effect. The function call
+%
+% func(0, 1, 'u', 5, 'z', 2);
+%
+% will result in u having the value 5 and v having value 1, but
+% will issue a warning that the 'z' option has not been used. On
+% the other hand, if func is defined as
+%
+% function y = func(x, y, varargin)
+%
+% [u, v, unused_args] = process_options(varargin, 'u', 0, 'v', 1);
+%
+% then the call func(0, 1, 'u', 5, 'z', 2) will yield no warning,
+% and unused_args will have the value {'z', 2}. This behaviour is
+% useful for functions with options that invoke other functions
+% with options; all options can be passed to the outer function and
+% its unprocessed arguments can be passed to the inner function.
+
+% Copyright (C) 2002 Mark A. Paskin
+%
+% This program is free software; you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation; either version 2 of the License, or
+% (at your option) any later version.
+%
+% This program is distributed in the hope that it will be useful, but
+% WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this program; if not, write to the Free Software
+% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+% USA.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+function [varargout] = process_options(args, varargin)
+
+% Check the number of input arguments
+n = length(varargin);
+if (mod(n, 2))
+ error('Each option must be a string/value pair.');
+end
+
+% Check the number of supplied output arguments
+if (nargout < (n / 2))
+ error('Insufficient number of output arguments given');
+elseif (nargout == (n / 2))
+ warn = 1;
+ nout = n / 2;
+else
+ warn = 0;
+ nout = n / 2 + 1;
+end
+
+% Set outputs to be defaults
+varargout = cell(1, nout);
+for i=2:2:n
+ varargout{i/2} = varargin{i};
+end
+
+% Now process all arguments
+nunused = 0;
+for i=1:2:length(args)
+ found = 0;
+ for j=1:2:n
+ if strcmpi(args{i}, varargin{j})
+ varargout{(j + 1)/2} = args{i + 1};
+ found = 1;
+ break;
+ end
+ end
+ if (~found)
+ if (warn)
+ warning(sprintf('Option ''%s'' not used.', args{i}));
+ args{i}
+ else
+ nunused = nunused + 1;
+ unused{2 * nunused - 1} = args{i};
+ unused{2 * nunused} = args{i + 1};
+ end
+ end
+end
+
+% Assign the unused arguments
+if (~warn)
+ if (nunused)
+ varargout{nout} = unused;
+ else
+ varargout{nout} = cell(0);
+ end
+end
diff -r 000000000000 -r e9a9cd732c1e toolboxes/MIRtoolbox1.3.2/AuditoryToolbox/AuditoryToolboxTechReport.pdf
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolboxes/MIRtoolbox1.3.2/AuditoryToolbox/AuditoryToolboxTechReport.pdf Tue Feb 10 15:05:51 2015 +0000
@@ -0,0 +1,12278 @@
+%PDF-1.1
%âãÏÓ
+1 0 obj
<<
/CreationDate (D:19981111211536)
/Producer (Acrobat Distiller 3.01 for Power Macintosh)
/Creator (FrameMaker 5.5 PowerPC: LaserWriter 8 8.6)
/Author (Malcolm)
/Title (AuditoryToolboxTechReport)
>>
endobj
3 0 obj
<<
/Length 35946
/Filter [/ASCII85Decode /LZWDecode]
>>
stream
+J.RTgd+;fnAL1"]5?m(6BiR!.kJ8R=b6B.YjH(5d>/-lF?Eh",pEtApV&]dh0
+=B%A7703K]:t7+QL^m;@W4F`9)i]QhBMfCQ5rtDC-UW1#o*RFGc%[HgibLIk:kiH\
+Kb>G+UC%2_GTdt]$&+"$;45\15Ve#3j!@7OG%EM":*L@Moa#FqT?g!e6(led%2Om+
+_[JB;)I!me75_>=-C7F@MfP'QTVQ$P(^_`M1O`+e<\BXD!X@Po]t
+=6;;#,j$11[14p$6'1.m/JsHN_i_ahBjeu=$Gqq.KZYajS?p+16-Rbq2d=6^SQlrV
+$AMi?]/h#INQL;RH1i2qhgKkVN5f,:36$Ug^.^U2-B'0W`i0E@!2_X_,BL$7"Turf
+eM.L/L;l3u5[5.]0^JoV1T^JT0oZ9SdP8P2!2MB1%)?$b1aAYY&]9W=&r/ej>7q_W
+!LRVS_alk^a9a)1kVBbDdm?,"+d9GN`FG:X0TK!Q5U"+jT\qg>is=l;NR>r?(C!0m
+EnX&s83eDrJl;;"+TVT2KH]>g5V7nmGfYVm"%Y\3dHc$li@r@-#_U7L
+TKkD9#^C$<"kBOj>D*ag'Z4"2+Ceg3To'Lf=8$3H?.ZMrIqRK:25b!=2UH4!=r(DV
+MV!pnO',^'c"1os[p>D8N)W$m\
+lW4"cVefP]Gt@7i[``.H)a6p"X]p8!\D1,&]=DdIOCT>H$[rPcDjj/'_!.60K>-=`
+cuX9VH7Oon?^j7r[-?A;^V)XsK8X#Er8/p1F$..7Xf&Z]MpcQEk>B+N*FA?bg"V$i
+f^=0Zl__E[VdItu<14;Vp>&Mcg[O&0^Q*lTq?2Q69A;WA.s8`:gTaB2T:Y0O^1=pO
+]q7n6B+fB0Qfk'6J:YbrB)gdM^),8ErV[O3)hrd-l1]Z*AfM]g4koN;)I%Z4n,`8K
+8+c9n1]BiQou=6"(@[uE#?YBM`&?/#Kq1%1K+'4;\;(Kri2]a8NRe<7@c7G54;*Y#k&]RrGfm#2%O)0T6;[E-T#W7!@I@c']cF[*!moBR'337gg
+%*5MmJ]A@H7qf!q,/FO_Xo0V9f;d]\E$tmk)E4g.h@<-j_Ig);8?GF.*b9qE=GIYa
+Z0"=p)&W^AJE<:;6=c#7&UA%F3CC,b.W)"so8`9])9V(!oHgTEO7,8/TAgoch].M1
+&iEe+`qmOXbf5rH?0DB9NtFoFb)au"?9I@@6<"iBPgQ_'8s4*BeV_A,;GM4%Bpgtk
+gfden:$'W%ZRCXP0KJ7E`Ih)-cXn/s:\0<`cBXj/fN`isdHQKR-7oY#S5P0e?%BPE
+)FWR!8W#>jjFGV;*#IYL+TU&_.'OU)_#S@QKI#HR)+cFu_YGl"TmkFt%RJ\p246)A
+IaQ8V6>;?;V<^s4"WBj:c2hl4MB-+A"UpH\cnVK[ATH5Zoh0#L:T:`jS1DGnR:_>/
+C<@T&'LpDM.O@ZcFXEg*>0/BA[9%Zh*/_,_ZS!.jDc#mG1=[6/<\$O@WTkP..+6X/
+Sq8Y5<=Zi'E,/T\eT-/!C:2^&cXU$f5W4!`&qe`3l'9Hj"t9+B=!cPcWRk/D,d'jZW17gfFfsgMuCA*O=agf1UPl?8>3]3.mjoNL
+B>,,SX4#p?>?qE/BcuQdhODi<%Q(T/(R!sO>QP?N8R&jp9*JE:pO?FJPeAtQaIeg7
+oF@r!"qr?m\u[huE+*"fc79"S]FNGI@9ZRUWsq5I=bj8)[4?E!&C[]lkjZ4K\aJ2\
+7-A2><4ffa(20i1*-$l+P:];UmG-[C;XSA?2KtQYjQ$.mOVb`)Fq\@.fB6e(mJFH5
+f7ho;lWj1hl`e9p<93"r?&D>\(g';o/k9aBL,1"Rc-F3Ak'E;a1"p3V(J+IijF(B2
+ef;uS_a$8aifCDmO0B@'6I[])H#tYhGmEXMk`$%IoIW,R#O>5o8q'.EP+4a?>"ZF/
+\"Mj1q5/uEr)::Bkj.)lf+S)#\hf8;p%AF<>#@ZP//)0Kp'fUcb?JdGAX>71Z\DSZ
+YV.Qj@eHV'XcGdm.!u*Rh,9'^3o^sXb5$cN2-Sd^&j0TY!q7bDakK\_6+YfN]j2]R
+g^!"eXo7jp0P,i4G1RGs7L^?6XdJ&<:B>=VId5_)=5:uoGn:iCLF161qrVN4u?EMh&k*YRc-.1I$D;9Ll
+NLDbFeT_$8Nqs!"n(bKLpV%BCHf#q:]_!._rFueMroWkp1WoPI)oVW7'6tFmdeKCF
+%H[E2k2*4+K7@et4kC+MP@a^-9t>m?15"B+Qe!7NEV!Q#\\bZlDC_W8@/d>fq*(/"
+n>19]_7c63s'DDspZK`68)A?;A*&0R1Q"A?$Jk_DM814Znbp3Ernl3&B01qB%CEX+
+R"5Qmq1hq:YKZge^A/<3rF`_C/5lO;c@H;+pC;$@&12nlFTTeRDi'qc..EWY3-8\W
+=c]M4%]]@Z8HWSiJIf\:YR^k5>(?"a"pb/3mLTU,$uZX-#>(BF+E7GKcNktgb-68C
+N/0KnRgCaj\L^Ybl(&&1!t$_D!U61LcjF]q%gm9Y#;@Y96h7o^UjlcmRP:]0nB.0#
+_G&<7b_f/FN6_qdW-(qM#cC0&Yi*Q@jF=(XV8rHCOAbreq[bIA")b.MYJf_1klaio
+$nod9:r)9B*\2R_15pJ^IfrVK,VR=hLbA2;q'UkJ(\3egadX`X0*cNj
+=7Vt-n/j0\Po'"j'iH8%P.;e+V^t^n.Zu//o"pSb*X;2RJ24!S;XH*"?S-<[!j]Bo
+'$,XQ!@VY6/on:A]J0liN+P(OaV"YqkA)m^D:'!1B9M$%>;306O^YXDc\
+2_-[71kh(s:s?e_2])'H9X=gXn2C-aB)GH=L2r82ZAg\8$mP9'r3L,tP,,T99fOqO
+2Nm/,F+.#.=YN_W2cG,W:`h\QbrA7,3cfF)F'C]S$TSj]$?Krll7S4EDpY!TWdR#s&u[&Q/atl:bl2Nr
+R6_s85Ek+ke)s4q=q[FsW#52=PlTiJ^X+p=^1`3PkKg=-V5dV6@VkuGT_C]=)
+V/%/AX,TKnLo]WW7UjlHoJ44_!5Qb^Oll;fV`(fE@'PEmV!B*nYtDJQTYfi][VRS)
+9/.E%5*=uk7r^AmP^L9MR)M^!RO23Te@`BBS@O\:8kX"mCu2eeTH\0%^a^`jC]]43
+O%F,h94GKBBsC=s`0iAm\na#ICZS@XR(/CA\"^lfCp,dm[6f6VI5RV\N"np6YG!3&KF(o\@sGGg>K')GM'daaVM_,udP_9l'K!
+O5MO'F=l$HR3LG(eU5U6>djhu5?3NT2."JMPYu=$H52ge'qERn51GO2=*93uED"9(
+o[^%Wq9H?"[QO%=_UL36QH\?Bqt>D#U>FJBpi2
+GDU"V(#ZoneQseqQOX;JWICqhJMISVdE/VnQ2INGq-lj=W:U$mMSik;B$k@*dP`/j
+m2odQ&=4-<=c2?k@qM&:2e=SddFLA1b+d<6m*I9bh5ssk/^n,d`G3geKc_/.Tnt^;u;aCi*a!XENMlVVV\YcHYBdm:C/eE4`R>]GC(H
+%c#q+hW7FJDp)&&QM#8+cL4fdQEM")DBS%6Wo#`Rh^`,Rf+DpO#f/,i`pQiC:!I_g
+&E@XHj\-t=/<&EpKZ6b[FNsW(%U-eg]!lcCiZ^6KkMHktK[mX[L6sQ[DJX@d=$B%YE-S][(f$HhXLb
+f>2lo]^3fAI>h4mCr#;;/F6I!8'I`4-2>jTK%Nd/hq.ej\+=p4eUL9Ro*os7:?:*R
+hUf68fM8lofIkec.6p:#eP>nDmek-:7dk/Do^,'>9gmHJs+6:)bDtT9%ktn['"2ZT
+nuSU?$o*H0JVQtHo*uIVmk4!g.alNYL6#mAhcP8*/HMqbirGJm(3D5YU0pF:;f!Pq[Vrm%,2jI/krqU1%
+fhmonAG5E*.N$b1413c!]VW/CY1EcqUV39ru]S(E45)
+bQFBsD&QR')C^.fqW
+[P?74dDVqZPgY]=YqgSFgSYqDPq_juNu:+]teJ^LA]t3Yh>Ka_X')iLRH4]DJC]b0PUV\Ns_5Qn>gHV"-jA'TFE/;'kIV
+_FQ&k4B]Uae%:c-`lKc;r(Ah>ZDDS.RP\9%]H+IDQc2(&b+*fQ-*P57AL5!0@AM-`
+P(55*RERCJ?!>E"]H&?!Fiii5_:rZNeN_-:;\G6\c@"0YdLBK]1FBBe'2jt1dXIT0
+cQ_Y0cuNnO6p2eY&;mD=.%>?),AFVbdDkQPR0LOWPd')S5qum%2Ihc62
+.ssM.V$o_se^BUC='\JK*DadJbf!CG38Gj7I!ULZHd`rZdiCfqP3pXF0A%cO(o01.
+dK%\b.5^sASC8Y+SX%It,1OMeaV@!mh5mq[MqM?0g&+c+)rcU!@2X,0]7"\8ba=*2
+k:fgMC0er9WL]Y*]:X=N==G1LKV]%W-)X*
+C5:I5:J^RJ=jp>AcqM_@LBQ.#gN