changeset 178:4ea4badb2266 danieleb

added ramirez dl (to be completed) and MOCOD dictionary update
author Daniele Barchiesi <daniele.barchiesi@eecs.qmul.ac.uk>
date Thu, 17 Nov 2011 11:22:17 +0000
parents 714fa7b8c1ad (diff) 775caafd5651 (current diff)
children 989b7d78e1c8
files .hgtags util/SMALL_solve.m
diffstat 28 files changed, 1739 insertions(+), 119 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/.hgignore	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,20 @@
+toolboxes/CVX
+toolboxes/GPSR
+toolboxes/KSVD
+toolboxes/KSVDS
+toolboxes/SPARCO
+toolboxes/SparseLab
+toolboxes/Sparsify
+toolboxes/SPGL1
+solvers/SMALL_ompGabor/omp2mexGabor\.mexmaci64
+solvers/SMALL_ompGabor/ompmexGabor\.mexmaci64
+util/ksvd utils/addtocols\.mexmaci64
+util/ksvd utils/col2imstep\.mexmaci64
+util/ksvd utils/collincomb\.mexmaci64
+util/ksvd utils/im2colstep\.mexmaci64
+util/ksvd utils/rowlincomb\.mexmaci64
+util/ksvd utils/sprow\.mexmaci64
+util/Rice Wavelet Toolbox/mdwt\.mexmaci64
+util/Rice Wavelet Toolbox/midwt\.mexmaci64
+util/Rice Wavelet Toolbox/mirdwt\.mexmaci64
+util/Rice Wavelet Toolbox/mrdwt\.mexmaci64
--- a/.hgtags	Wed Sep 07 14:17:55 2011 +0100
+++ b/.hgtags	Thu Nov 17 11:22:17 2011 +0000
@@ -3,5 +3,6 @@
 9ff69e8e049f936804d0e5876cd4d367be9f3c4a backup 14032011
 b14e1f6ee4bea90a8a894a52c1114a72aa818071 ver_1.1
 19e0af5709140e163faaf9d8cf4b83a664be1edc release_1.5
-30872eb3d1606b89f8d071cfdb7cddb874fab8fd release_1.51
-4205744092e6a16951e48acef27fb41e32851758 release_1.9
+a4d0977d45956b96579a3ed83a4e6a1869ee6055 danieleb
+a4d0977d45956b96579a3ed83a4e6a1869ee6055 danieleb
+0000000000000000000000000000000000000000 danieleb
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/DL/dl_ramirez.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,189 @@
+function DL = dl_ramirez(Problem,DL)
+%% Dictionary learning with incoherent dictionary
+%
+% REFERENCE
+% I. Ramirez, F. Lecumberry and G. Sapiro, Sparse modeling with universal
+% priors and learned incoherent dictionaries.
+
+%%
+%   Centre for Digital Music, Queen Mary, University of London.
+%   This file copyright 2011 Daniele Barchiesi.
+%
+%   This program is free software; you can redistribute it and/or
+%   modify it under the terms of the GNU General Public License as
+%   published by the Free Software Foundation; either version 2 of the
+%   License, or (at your option) any later version.  See the file
+%   COPYING included with this distribution for more information.
+
+%% Test function
+if ~nargin, testdl_ramirez; return; end
+
+%% Parameters & Defaults
+X = Problem.b;					%matrix of observed signals
+
+% determine dictionary size %
+if (isfield(DL.param,'initdict')) %if the dictionary has been initialised
+	if (any(size(DL.param.initdict)==1) && all(iswhole(DL.param.initdict(:))))
+		dictSize = length(DL.param.initdict);
+	else
+		dictSize = size(DL.param.initdict,2);
+	end
+end
+if (isfield(DL.param,'dictsize'))
+	dictSize = DL.param.dictsize;
+end
+
+if (size(X,2) < dictSize)
+	error('Number of training signals is smaller than number of atoms to train');
+end
+
+
+% initialize the dictionary %
+if (isfield(DL.param,'initdict')) && ~isempty(DL.param.initdict);
+	if (any(size(DL.param.initdict)==1) && all(iswhole(DL.param.initdict(:))))
+		D = X(:,DL.param.initdict(1:dictSize));
+	else
+		if (size(DL.param.initdict,1)~=size(X,1) || size(DL.param.initdict,2)<dictSize)
+			error('Invalid initial dictionary');
+		end
+		D = DL.param.initdict(:,1:dictSize);
+	end
+else
+	data_ids = find(colnorms_squared(X) > 1e-6);   % ensure no zero data elements are chosen
+	perm = randperm(length(data_ids));
+	D = X(:,data_ids(perm(1:dictSize)));
+end
+
+
+% coherence penalty factor
+if isfield(DL.param,'zeta')
+	zeta = DL.param.zeta;
+else
+	zeta = 0.1; 
+end
+
+% atoms norm penalty factor
+if isfield(DL.param,'eta')
+	eta = DL.param.eta;
+else
+	eta = 0.1; 
+end
+
+% number of iterations (default is 40) %
+if isfield(DL.param,'iternum')
+    iternum = DL.param.iternum;
+else
+    iternum = 40;
+end
+
+% show dictonary every specified number of iterations
+if isfield(DL.param,'show_dict')
+    show_dictionary=1;
+    show_iter=DL.param.show_dict;
+else
+    show_dictionary=0;
+    show_iter=0;
+end
+
+tmpTraining = Problem.b1;
+Problem.b1 = X;
+if isfield(Problem,'reconstruct')
+    Problem = rmfield(Problem, 'reconstruct');
+end
+
+
+%% Main Algorithm
+Dprev = D;						%initial dictionary
+Aprev = D\X;				    %set initial solution as pseudoinverse
+for i = 1:iternum
+	%Sparse Coding by 
+	A = sparsecoding(X,D,Aprev);
+	%Dictionary Update
+	D = dictionaryupdate(X,A,Dprev,zeta,eta);
+	
+	Dprev = D;
+	Aprev = A;
+   if ((show_dictionary)&&(mod(i,show_iter)==0))
+       dictimg = SMALL_showdict(dico,[8 8],...
+            round(sqrt(size(dico,2))),round(sqrt(size(dico,2))),'lines','highcontrast');  
+       figure(2); imagesc(dictimg);colormap(gray);axis off; axis image;
+       pause(0.02);
+   end
+end
+
+Problem.b1 = tmpTraining;
+DL.D = D;
+
+end
+
+function A = sparsecoding(X,D,Aprev)
+%Sparse coding using a mixture of laplacians (MOL) as universal prior.
+
+%parameters
+K = size(D,2);	%number of atoms
+M = size(X,2);	%number of signals
+
+mu1 = mean(abs(Aprev(:)));			%first moment of distribution of Aprev
+mu2 = (norm(Aprev(:))^2)/numel(Aprev);%second moment of distribution of Aprev
+kappa = 2*(mu2-mu1^2)/(mu2-2*mu2^2); %parameter kappa of the MOL distribution
+beta = (kappa-1)*mu1;				%parameter beta of the MOL distribution
+
+E = X-D*Aprev;						%error term
+sigmasq = mean(var(E));				%error variance
+tau = 2*sigmasq*(kappa+1);			%sparsity factor
+
+%solve a succession of subproblems to approximate the non-convex cost
+%function
+nIter = 10;							%number of iterations of surrogate subproblem
+Psi   = zeros(K,M);					%initialise solution of subproblem
+for iIter=1:nIter
+	Reg = 1./(abs(Psi) + beta);
+	Psi = solvel1(X,D,tau,Reg);
+end
+A = Psi;
+end
+
+function Psi = solvel1(X,D,tau,A)
+	[K M] = size(A);
+	Psi = zeros(K,M);
+	for m=1:M
+		cvx_begin quiet
+			variable v(K)
+			minimise (norm(X(:,m)-D*v) + tau*norm(A(:,m).*v,1));
+		cvx_end
+		Psi(:,m) = v;
+	end
+end
+
+function D = dictionaryupdate(X,A,Dprev,zeta,eta)
+	D = (X*A' + 2*(zeta + eta)*Dprev)/(A*A' + 2*zeta*(Dprev'*Dprev) + 2*eta*diag(diag(Dprev'*Dprev)));
+end
+
+
+
+function Y = colnorms_squared(X)
+% compute in blocks to conserve memory
+Y = zeros(1,size(X,2));
+blocksize = 2000;
+for i = 1:blocksize:size(X,2)
+	blockids = i : min(i+blocksize-1,size(X,2));
+	Y(blockids) = sum(X(:,blockids).^2);
+end
+end
+
+function testdl_ramirez
+	clc
+	N = 10;		%ambient dimension
+	K = 20;		%number of atoms
+	M = 30;		%number of observed signals
+	X = randn(N,M);		%observed signals
+	D = normcol(randn(N,K)); %initial dictionary
+	Problem.b = X;		%sparse representation problem
+	Problem.b1 = X;
+	DL = SMALL_init_DL('dl_ramirez');
+	DL.param.initdict = D;
+	DL.param = struct('initdict',D,...
+					  'zeta',0.5,...
+					  'eta',0.5);
+	DL = SMALL_learn(Problem,DL);
+end
--- a/DL/two-step DL/SMALL_two_step_DL.m	Wed Sep 07 14:17:55 2011 +0100
+++ b/DL/two-step DL/SMALL_two_step_DL.m	Thu Nov 17 11:22:17 2011 +0000
@@ -4,7 +4,7 @@
 
 solver = DL.param.solver;
 
-% determine which type of udate to use ('KSVD', 'MOD', 'ols' or 'mailhe') %
+% determine which type of udate to use ('KSVD', 'MOD','MOCOD','ols' or 'mailhe') %
 
 typeUpdate = DL.name;
 
@@ -30,7 +30,7 @@
 
 % initialize the dictionary %
 
-if (isfield(DL.param,'initdict'))
+if (isfield(DL.param,'initdict')) && ~isempty(DL.param.initdict);
   if (any(size(DL.param.initdict)==1) && all(iswhole(DL.param.initdict(:))))
     dico = sig(:,DL.param.initdict(1:dictsize));
   else
@@ -76,13 +76,15 @@
 end
 % determine if we should do decorrelation in every iteration  %
 
-if isfield(DL.param,'coherence')
+if isfield(DL.param,'coherence') && isscalar(DL.param.coherence)
     decorrelate = 1;
     mu = DL.param.coherence;
 else
     decorrelate = 0;
 end
 
+if ~isfield(DL.param,'decFcn'), DL.param.decFcn = 'none'; end
+
 % show dictonary every specified number of iterations
 
 if isfield(DL.param,'show_dict')
@@ -108,13 +110,34 @@
 % main loop %
 
 for i = 1:iternum
-    Problem.A = dico;
+    %disp([num2str(i) '/' num2str(iternum)]);
+    %SPARSE CODING STEP
+	Problem.A = dico;
     solver = SMALL_solve(Problem, solver);
-    [dico, solver.solution] = dico_update(dico, sig, solver.solution, ...
-        typeUpdate, flow, learningRate);
-    if (decorrelate)
-        dico = dico_decorr(dico, mu, solver.solution);
-    end
+	%DICTIONARY UPDATE STEP
+	if strcmpi(typeUpdate,'mocod')			%if update is MOCOD create parameters structure
+		mocodParams = struct('zeta',DL.param.zeta,...	%coherence regularization factor
+							 'eta',DL.param.eta,...		%atoms norm regularization factor
+							 'Dprev',dico);				%previous dictionary
+		dico = dico_update(dico,sig,solver.solution,typeUpdate,flow,learningRate,mocodParams);
+	else
+		[dico, solver.solution] = dico_update(dico, sig, solver.solution, ...
+			typeUpdate, flow, learningRate);
+		dico = normcols(dico);
+	end
+    
+        switch lower(DL.param.decFcn)
+            case 'ink-svd'
+                dico = dico_decorr_symetric(dico,mu,solver.solution);
+            case 'grassmannian'
+                [n m] = size(dico);
+                dico = grassmannian(n,m,[],0.9,0.99,dico);
+			case 'shrinkgram'
+				dico = shrinkgram(dico,mu);
+			case 'iterproj'
+				dico = iterativeprojections(dico,mu,Problem.b1,solver.solution);
+            otherwise
+        end
     
    if ((show_dictionary)&&(mod(i,show_iter)==0))
        dictimg = SMALL_showdict(dico,[8 8],...
@@ -139,4 +162,4 @@
   Y(blockids) = sum(X(:,blockids).^2);
 end
 
-end
\ No newline at end of file
+end
--- a/DL/two-step DL/dico_decorr.m	Wed Sep 07 14:17:55 2011 +0100
+++ b/DL/two-step DL/dico_decorr.m	Thu Nov 17 11:22:17 2011 +0000
@@ -9,6 +9,8 @@
     %   Result:
     %   dico: a dictionary close to the input one with coherence mu.
     
+    eps = 1e-6; % define tolerance for normalisation term alpha
+    
     % compute atom weights
     if nargin > 2
         rank = sum(amp.*amp, 2);
@@ -20,7 +22,7 @@
     % coherence mu. niter can be adjusted to needs.
     niter = 1;
     while niter < 5 && ...
-            max(max(abs(dico'*dico -eye(length(dico))))) > mu + 10^-6
+            max(max(abs(dico'*dico -eye(length(dico))))) > mu + eps
         % find pairs of high correlation atoms
         colors = dico_color(dico, mu);
         
@@ -36,7 +38,7 @@
                 
                 % update the atom
                 corr = dico(:,index(1))'*dico(:,index(2));
-                alpha = sqrt((1-mu*mu)/(1-corr*corr));
+                alpha = sqrt((1-mu*mu)/(1-corr^2+eps));
                 beta = corr*alpha-mu*sign(corr);
                 dico(:,index(2)) = alpha*dico(:,index(2))...
                     -beta*dico(:,index(1));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/DL/two-step DL/dico_decorr_symetric.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,61 @@
+function dico = dico_decorr_symetric(dico, mu, amp)
+    %DICO_DECORR decorrelate a dictionary
+    %   Parameters:
+    %   dico: the dictionary
+    %   mu: the coherence threshold
+    %   amp: the amplitude coefficients, only used to decide which atom to
+    %   project
+    %
+    %   Result:
+    %   dico: a dictionary close to the input one with coherence mu.
+    
+    eps = 1e-6; % define tolerance for normalisation term alpha
+    
+    % convert mu to the to the mean direction
+    theta = acos(mu)/2;
+    ctheta = cos(theta);
+    stheta = sin(theta);
+    
+    % compute atom weights
+    %     if nargin > 2
+    %         rank = sum(amp.*amp, 2);
+    %     else
+    %         rank = randperm(length(dico));
+    %     end
+    
+    % several decorrelation iterations might be needed to reach global
+    % coherence mu. niter can be adjusted to needs.
+    niter = 1;
+    while max(max(abs(dico'*dico -eye(length(dico))))) > mu + 0.01
+        % find pairs of high correlation atoms
+        colors = dico_color(dico, mu);
+        
+        % iterate on all pairs
+        nbColors = max(colors);
+        for c = 1:nbColors
+            index = find(colors==c);
+            if numel(index) == 2
+                if dico(:,index(1))'*dico(:,index(2)) > 0               
+                    %build the basis vectors
+                    v1 = dico(:,index(1))+dico(:,index(2));
+                    v1 = v1/norm(v1);
+                    v2 = dico(:,index(1))-dico(:,index(2));
+                    v2 = v2/norm(v2);
+                    
+                    dico(:,index(1)) = ctheta*v1+stheta*v2;
+                    dico(:,index(2)) = ctheta*v1-stheta*v2;
+                else
+                    v1 = dico(:,index(1))-dico(:,index(2));
+                    v1 = v1/norm(v1);
+                    v2 = dico(:,index(1))+dico(:,index(2));
+                    v2 = v2/norm(v2);
+                    
+                    dico(:,index(1)) = ctheta*v1+stheta*v2;
+                    dico(:,index(2)) = -ctheta*v1+stheta*v2;
+                end
+            end
+        end
+        niter = niter+1;
+    end
+end
+
--- a/DL/two-step DL/dico_update.m	Wed Sep 07 14:17:55 2011 +0100
+++ b/DL/two-step DL/dico_update.m	Thu Nov 17 11:22:17 2011 +0000
@@ -1,107 +1,120 @@
-function [dico, amp] = dico_update(dico, sig, amp, type, flow, rho)
+function [dico, amp] = dico_update(dico, sig, amp, type, flow, rho,mocodParams)
 
-    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-    % [dico, amp] = dico_update(dico, sig, amp, type, flow, rho)
-    %
-    % perform one iteration of dictionary update for dictionary learning
-    %
-    % parameters:
-    % - dico: the initial dictionary with atoms as columns
-    % - sig: the training data
-    % - amp: the amplitude coefficients as a sparse matrix
-    % - type: the algorithm can be one of the following
-    %   - ols: fixed step gradient descent
-    %   - mailhe: optimal step gradient descent (can be implemented as a
-    %   default for ols?)
-    %   - MOD: pseudo-inverse of the coefficients
-    %   - KSVD: already implemented by Elad
-    % - flow: 'sequential' or 'parallel'. If sequential, the residual is
-    % updated after each atom update. If parallel, the residual is only
-    % updated once the whole dictionary has been computed. Sequential works
-    % better, there may be no need to implement parallel. Not used with
-    % MOD.
-    % - rho: learning rate. If the type is 'ols', it is the descent step of
-    % the gradient (typical choice: 0.1). If the type is 'mailhe', the 
-    % descent step is the optimal step*rho (typical choice: 1, although 2
-    % or 3 seems to work better). Not used for MOD and KSVD.
-    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-    if ~ exist( 'rho', 'var' ) || isempty(rho)
-        rho = 0.1;
-    end
-    
-    if ~ exist( 'flow', 'var' ) || isempty(flow)
-        flow = sequential;
-    end
-    
-    res = sig - dico*amp;
-    nb_pattern = size(dico, 2);
-    
-    switch type
-        case 'rand'
-            x = rand();
-            if x < 1/3
-                type = 'MOD';
-            elseif type < 2/3
-                type = 'mailhe';
-            else
-                type = 'KSVD';
-            end
-    end
-    
-    switch type
-        case 'MOD'
-            G = amp*amp';
-            dico2 = sig*amp'*G^-1;
-            for p = 1:nb_pattern
-                n = norm(dico2(:,p));
-                % renormalize
-                if n > 0
-                    dico(:,p) = dico2(:,p)/n;
-                    amp(p,:) = amp(p,:)*n;
-                end
-            end
-        case 'ols'
-            for p = 1:nb_pattern
-                grad = res*amp(p,:)';
-                if norm(grad) > 0
-                    pat = dico(:,p) + rho*grad;
-                    pat = pat/norm(pat);
-                    if nargin >5 && strcmp(flow, 'sequential')
-                        res = res + (dico(:,p)-pat)*amp(p,:); %#ok<*NASGU>
-                    end
-                    dico(:,p) = pat;
-                end
-            end
-        case 'mailhe'
-            for p = 1:nb_pattern
-                grad = res*amp(p,:)';
-                if norm(grad) > 0
-                    pat = (amp(p,:)*amp(p,:)')*dico(:,p) + rho*grad;
-                    pat = pat/norm(pat);
-                    if nargin >5 && strcmp(flow, 'sequential')
-                        res = res + (dico(:,p)-pat)*amp(p,:);
-                    end
-                    dico(:,p) = pat;
-                end
-            end
-        case 'KSVD'
-            for p = 1:nb_pattern
-                index = find(amp(p,:)~=0);
-                if ~isempty(index)
-                    patch = res(:,index)+dico(:,p)*amp(p,index);
-                    [U,S,V] = svd(patch);
-                    if U(:,1)'*dico(:,p) > 0
-                        dico(:,p) = U(:,1);
-                    else
-                        dico(:,p) = -U(:,1);
-                    end
-                    dico(:,p) = dico(:,p)/norm(dico(:,p));
-                    amp(p,index) = dico(:,p)'*patch;
-                    if nargin >5 && strcmp(flow, 'sequential')
-                        res(:,index) = patch-dico(:,p)*amp(p,index);
-                    end
-                end
-            end
-    end
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% [dico, amp] = dico_update(dico, sig, amp, type, flow, rho)
+%
+% perform one iteration of dictionary update for dictionary learning
+%
+% parameters:
+% - dico: the initial dictionary with atoms as columns
+% - sig: the training data
+% - amp: the amplitude coefficients as a sparse matrix
+% - type: the algorithm can be one of the following
+%   - ols: fixed step gradient descent
+%   - mailhe: optimal step gradient descent (can be implemented as a
+%   default for ols?)
+%   - MOD: pseudo-inverse of the coefficients
+%   - KSVD: already implemented by Elad
+% - flow: 'sequential' or 'parallel'. If sequential, the residual is
+% updated after each atom update. If parallel, the residual is only
+% updated once the whole dictionary has been computed. Sequential works
+% better, there may be no need to implement parallel. Not used with
+% MOD.
+% - rho: learning rate. If the type is 'ols', it is the descent step of
+% the gradient (typical choice: 0.1). If the type is 'mailhe', the
+% descent step is the optimal step*rho (typical choice: 1, although 2
+% or 3 seems to work better). Not used for MOD and KSVD.
+% - mocodParams: struct containing the parameters for the MOCOD dictionary
+% update (see Ramirez et Al., Sparse modeling with universal priors and
+% learned incoherent dictionaries). The required fields are
+%	.Dprev: dictionary at previous optimisation step
+%	.zeta: coherence regularization factor
+%	.eta: atoms norm regularisation factor
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+if ~ exist( 'rho', 'var' ) || isempty(rho)
+	rho = 0.1;
 end
 
+if ~ exist( 'flow', 'var' ) || isempty(flow)
+	flow = sequential;
+end
+
+res = sig - dico*amp;
+nb_pattern = size(dico, 2);
+
+switch type
+	case 'rand'
+		x = rand();
+		if x < 1/3
+			type = 'MOD';
+		elseif type < 2/3
+			type = 'mailhe';
+		else
+			type = 'KSVD';
+		end
+end
+
+switch upper(type)
+	case 'MOD'
+		G = amp*amp';
+		dico2 = sig*amp'*G^-1;
+		for p = 1:nb_pattern
+			n = norm(dico2(:,p));
+			% renormalize
+			if n > 0
+				dico(:,p) = dico2(:,p)/n;
+				amp(p,:) = amp(p,:)*n;
+			end
+		end
+	case 'OLS'
+		for p = 1:nb_pattern
+			grad = res*amp(p,:)';
+			if norm(grad) > 0
+				pat = dico(:,p) + rho*grad;
+				pat = pat/norm(pat);
+				if nargin >5 && strcmp(flow, 'sequential')
+					res = res + (dico(:,p)-pat)*amp(p,:); %#ok<*NASGU>
+				end
+				dico(:,p) = pat;
+			end
+		end
+	case 'MAILHE'
+		for p = 1:nb_pattern
+			grad = res*amp(p,:)';
+			if norm(grad) > 0
+				pat = (amp(p,:)*amp(p,:)')*dico(:,p) + rho*grad;
+				pat = pat/norm(pat);
+				if nargin >5 && strcmp(flow, 'sequential')
+					res = res + (dico(:,p)-pat)*amp(p,:);
+				end
+				dico(:,p) = pat;
+			end
+		end
+	case 'KSVD'
+		for p = 1:nb_pattern
+			index = find(amp(p,:)~=0);
+			if ~isempty(index)
+				patch = res(:,index)+dico(:,p)*amp(p,index);
+				[U,~,V] = svd(patch);
+				if U(:,1)'*dico(:,p) > 0
+					dico(:,p) = U(:,1);
+				else
+					dico(:,p) = -U(:,1);
+				end
+				dico(:,p) = dico(:,p)/norm(dico(:,p));
+				amp(p,index) = dico(:,p)'*patch;
+				if nargin >5 && strcmp(flow, 'sequential')
+					res(:,index) = patch-dico(:,p)*amp(p,index);
+				end
+			end
+		end
+	case 'MOCOD'
+		zeta = mocodParams.zeta;
+		eta = mocodParams.eta;
+		Dprev = mocodParams.Dprev;
+		
+		dico = (sig*amp' + 2*(zeta+eta)*Dprev)/...
+			(amp*amp' + 2*zeta*(Dprev'*Dprev) + 2*eta*diag(diag(Dprev'*Dprev)));
+end
+end
+
Binary file data/audio/wav/oboe.mf.c4b4.wav has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/examples/SMALL_test_coherence.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,208 @@
+clear
+
+%% Parameteres
+
+% Dictionary learning parameters
+toolbox   = 'TwoStepDL'; %dictionary learning toolbox
+dicUpdate = {'ksvd','mailhe'}; %dictionary updates
+iterNum   = 20; %number of iterations
+
+% Test signal parameters
+signal    = audio('music03_16kHz.wav'); %audio signal
+blockSize = 256; %size of audio frames
+dictSize  = 512; %number of atoms in the dictionary
+overlap   = 0.5; %overlap between consecutive frames
+sigma     = 1e6; %snr of noise (set to be negligible so that the problem becomes approximation rather than denoising)
+percActiveAtoms = 5; %percentage of active atoms
+
+% Dependent parameters
+nActiveAtoms = fix(blockSize/100*percActiveAtoms); %number of active atoms
+epsilon      = 1/sigma; %error constraint for sparse representation step (corresponds to noise applied to signals)
+minMu = sqrt((dictSize-blockSize)/(blockSize*(dictSize-1))); %target coherence (based on coherence lower bound)
+minCoherence = [0.1:0.1:1];
+
+% Initial dictionaries
+dctDict = dictionary('dct',blockSize,dictSize);
+dctDict = dctDict.phi;
+gaborParam = struct('N',blockSize,'redundancyFactor',2,'wd',@rectwin);
+gaborDict = Gabor_Dictionary(gaborParam);
+
+%% Generate audio denoising problem with low noise (audio representation)
+SMALL.Problem = generateAudioDenoiseProblem(signal.s,[],blockSize,...
+	dictSize,overlap,sigma); % generate representation problem
+SMALL.Problem.b1 = SMALL.Problem.b; % copy signals from training set b to test set b1 (needed for later functions)
+
+% omp2 sparse representation solver
+ompParam = struct('X',SMALL.Problem.b,'epsilon',epsilon,'maxatoms',nActiveAtoms); %parameters
+solver = SMALL_init_solver('ompbox','omp2',ompParam,false); %solver structure
+
+
+%% Test
+name = dicUpdate{1}; %use ksvd update
+SMALL.DL(1:36) = SMALL_init_DL(toolbox,name); %create dictionary learning structures
+
+% learn with random initialisation and no decorrelation
+SMALL.DL(1).param = struct('data',SMALL.Problem.b,'Tdata',nActiveAtoms,...
+	'dictsize',dictSize,'iternum',iterNum,'memusage','high','solver',solver,...
+	'decFcn','none'); %parameters for the dictionary learning
+SMALL.DL(1) = SMALL_learn(SMALL.Problem,SMALL.DL(1)); %learn dictionary
+%save('SMALL','SMALL');
+
+% learn with random initialisation and mailhe decorrelation
+for iMu=1:10
+	SMALL.DL(1+iMu).param = struct('data',SMALL.Problem.b,'Tdata',nActiveAtoms,...
+		'dictsize',dictSize,'iternum',iterNum,'memusage','high','solver',solver,...
+		'decFcn','mailhe','coherence',minCoherence(iMu)); %parameters for the dictionary learning
+	SMALL.DL(1+iMu) = SMALL_learn(SMALL.Problem,SMALL.DL(1+iMu)); %learn dictionary
+	%save('SMALL','SMALL');
+end
+
+% learn with random initialisation and tropp decorrelation
+SMALL.DL(12).param = struct('data',SMALL.Problem.b,'Tdata',nActiveAtoms,...
+	'dictsize',dictSize,'iternum',iterNum,'memusage','high','solver',solver,...
+	'decFcn','tropp','coherence',minCoherence); %parameters for the dictionary learning
+SMALL.DL(12) = SMALL_learn(SMALL.Problem,SMALL.DL(12)); %learn dictionary
+
+% Learn with dct initialisation and no decorrelation
+SMALL.DL(13).param = struct('data',SMALL.Problem.b,'Tdata',nActiveAtoms,...
+	'dictsize',dictSize,'iternum',iterNum,'memusage','high','solver',solver,...
+	'decFcn','none','initdict',dctDict); %parameters for the dictionary learning
+SMALL.DL(13) = SMALL_learn(SMALL.Problem,SMALL.DL(13)); %learn dictionary
+
+% learn with dct initialisation and mailhe decorrelation
+for iMu=1:10
+	SMALL.DL(13+iMu).param = struct('data',SMALL.Problem.b,'Tdata',nActiveAtoms,...
+		'dictsize',dictSize,'iternum',iterNum,'memusage','high','solver',solver,...
+		'decFcn','mailhe','coherence',minCoherence(iMu),'initdict',dctDict); %parameters for the dictionary learning
+	SMALL.DL(13+iMu) = SMALL_learn(SMALL.Problem,SMALL.DL(13+iMu)); %learn dictionary
+	%save('SMALL','SMALL');
+end
+
+% learn with dct initialisation and tropp decorrelation
+SMALL.DL(24).param = struct('data',SMALL.Problem.b,'Tdata',nActiveAtoms,...
+	'dictsize',dictSize,'iternum',iterNum,'memusage','high','solver',solver,...
+	'decFcn','tropp','coherence',minCoherence,'initdict',dctDict); %parameters for the dictionary learning
+SMALL.DL(24) = SMALL_learn(SMALL.Problem,SMALL.DL(24)); %learn dictionary
+
+
+% Learn with gabor initialisation and no decorrelation
+SMALL.DL(25).param = struct('data',SMALL.Problem.b,'Tdata',nActiveAtoms,...
+	'dictsize',dictSize,'iternum',iterNum,'memusage','high','solver',solver,...
+	'decFcn','none','initdict',gaborDict); %parameters for the dictionary learning
+SMALL.DL(25) = SMALL_learn(SMALL.Problem,SMALL.DL(25)); %learn dictionary
+
+% learn with gabor initialisation and mailhe decorrelation
+for iMu=1:10
+	SMALL.DL(25+iMu).param = struct('data',SMALL.Problem.b,'Tdata',nActiveAtoms,...
+		'dictsize',dictSize,'iternum',iterNum,'memusage','high','solver',solver,...
+		'decFcn','mailhe','coherence',minCoherence(iMu),'initdict',gaborDict); %parameters for the dictionary learning
+	SMALL.DL(25+iMu) = SMALL_learn(SMALL.Problem,SMALL.DL(25+iMu)); %learn dictionary
+	%save('SMALL','SMALL');
+end
+
+% learn with gabor initialisation and tropp decorrelation
+SMALL.DL(36).param = struct('data',SMALL.Problem.b,'Tdata',nActiveAtoms,...
+	'dictsize',dictSize,'iternum',iterNum,'memusage','high','solver',solver,...
+	'decFcn','tropp','coherence',minCoherence,'initdict',gaborDict); %parameters for the dictionary learning
+SMALL.DL(36) = SMALL_learn(SMALL.Problem,SMALL.DL(36)); %learn dictionary
+
+%% Evaluate coherence and snr of representation for the various methods
+sigNoiseRatio = zeros(36,1);
+mu = zeros(36,1);
+for i=1:36
+	SMALL.Problem.A = SMALL.DL(i).D;
+	tempSolver = SMALL_solve(SMALL.Problem,solver);
+	sigNoiseRatio(i) = snr(SMALL.Problem.b,SMALL.DL(i).D*tempSolver.solution);
+	dic(i) = dictionary(SMALL.DL(i).D);
+	mu(i) = dic(i).coherence;
+end
+
+%% Plot results
+minMu = sqrt((dictSize-blockSize)/(blockSize*(dictSize-1)));
+
+figure,
+%subplot(3,1,1)
+hold on, grid on
+title('Data Initialisation')
+plot([1 1],[0 25],'k-');
+plot(mu(1),sigNoiseRatio(1),'ks');
+plot(mu(12),sigNoiseRatio(12),'kd');
+plot(mu(2:11),sigNoiseRatio(2:11),'k*-');
+plot([minMu minMu],[0 25],'k--')
+set(gca,'YLim',[0 25],'XLim',[0 1.4]);
+legend({'\mu_{max}','K-SVD','Grassmannian','INK-SVD','\mu_{min}'});
+xlabel('\mu');
+ylabel('SNR (dB)');
+
+figure
+%subplot(3,1,2)
+hold on, grid on
+title('DCT Initialisation')
+plot([1 1],[0 25],'k-');
+plot(mu(13),sigNoiseRatio(13),'ks');
+plot(mu(24),sigNoiseRatio(24),'kd');
+plot(mu(14:23),sigNoiseRatio(14:23),'k*-');
+plot([minMu minMu],[0 25],'k--')
+set(gca,'YLim',[0 25],'XLim',[0 1.4]);
+legend({'\mu_{max}','K-SVD','Grassmannian','INK-SVD','\mu_{min}'});
+xlabel('\mu');
+ylabel('SNR (dB)');
+
+figure
+%subplot(3,1,3)
+hold on, grid on
+title('Gabor Initialisation')
+plot([1 1],[0 25],'k-');
+plot(mu(25),sigNoiseRatio(25),'ks');
+plot(mu(36),sigNoiseRatio(36),'kd');
+plot(mu(26:35),sigNoiseRatio(26:35),'k*-');
+plot([minMu minMu],[0 25],'k--')
+set(gca,'YLim',[0 25],'XLim',[0 1.4]);
+legend({'\mu_{max}','K-SVD','Grassmannian','INK-SVD','\mu_{min}'});
+xlabel('\mu');
+ylabel('SNR (dB)');
+
+% minMu = sqrt((dictSize-blockSize)/(blockSize*(dictSize-1)));
+% maxSNR = max(sigNoiseRatio);
+%
+% figure, subplot(2,2,1)
+% snrMat = buffer(sigNoiseRatio(1:9),3);
+% bar(snrMat');
+% title('Signal to noise ratio')
+% xlabel('Initial dictionary')
+% ylabel('SNR (dB)')
+% set(gca,'XTickLabel',{'data','dct','gabor'});
+% legend('none','Mailhe','Tropp')
+% grid on
+%
+% subplot(2,2,2), grid on
+% snrMat = buffer(sigNoiseRatio(10:18),3);
+% bar(snrMat');
+% title('SNR - Mailhe Update')
+% xlabel('Initial dictionary')
+% ylabel('SNR (dB)')
+% set(gca,'XTickLabel',{'data','dct','gabor'},'YLim',[0 maxSNR+1]);
+% legend('none','Mailhe','Tropp')
+% grid on
+%
+% subplot(2,2,3), hold on, grid on
+% title('Coherence')
+% muMat = buffer(mu(1:9),3);
+% line([0.5 3.5],[1 1],'Color','r');
+% bar(muMat');
+% line([0.5 3.5],[minMu minMu],'Color','k');
+% set(gca,'XTick',1:3,'XTickLabel',{'data','dct','gabor'},'YLim',[0 1.05])
+% legend('\mu_{max}','none','Mailhe','Tropp','\mu_{min}')
+% ylabel('\mu')
+% xlabel('Initial dictionary')
+%
+% subplot(2,2,4), hold on, grid on
+% title('Coherence - Mailhe Update')
+% muMat = buffer(mu(10:18),3);
+% line([0.5 3.5],[1 1],'Color','r');
+% bar(muMat');
+% line([0.5 3.5],[minMu minMu],'Color','k');
+% set(gca,'XTick',1:3,'XTickLabel',{'data','dct','gabor'},'YLim',[0 1.05])
+% legend('\mu_{max}','none','Mailhe','Tropp','\mu_{min}')
+% ylabel('\mu')
+% xlabel('Initial dictionary')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/examples/SMALL_test_coherence2.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,117 @@
+clc, clear, close all
+
+%% Parameteres
+nTrials   = 10;										%number of trials of the experiment
+
+% Dictionary learning parameters
+toolbox   = 'TwoStepDL';							%dictionary learning toolbox
+dicUpdate = 'ksvd';									%dictionary learning updates
+dicDecorr = {'iterproj','ink-svd','shrinkgram'};	%dictionary decorrelation methods
+minCoherence = linspace(0.1,1,10);					%coherence levels
+
+iterNum   = 20;				%number of iterations
+epsilon   = 1e-6;			%tolerance level
+dictSize  = 512;			%number of atoms in the dictionary
+percActiveAtoms = 5;		%percentage of active atoms
+
+% Test signal parameters
+signal    = audio('music03_16kHz.wav'); %audio signal
+blockSize = 256;						%size of audio frames
+overlap   = 0.5;						%overlap between consecutive frames
+
+% Dependent parameters
+nActiveAtoms = fix(blockSize/100*percActiveAtoms); %number of active atoms
+
+% Initial dictionaries
+gaborParam = struct('N',blockSize,'redundancyFactor',2,'wd',@rectwin);
+gaborDict = Gabor_Dictionary(gaborParam);
+initDicts = {[],gaborDict};
+
+%% Generate audio approximation problem
+signal			 = buffer(signal,blockSize,blockSize*overlap,@rectwin);	%buffer frames of audio into columns of the matrix S
+SMALL.Problem.b  = signal.S;
+SMALL.Problem.b1 = SMALL.Problem.b; % copy signals from training set b to test set b1 (needed for later functions)
+
+% omp2 sparse representation solver
+ompParam = struct('X',SMALL.Problem.b,'epsilon',epsilon,'maxatoms',nActiveAtoms); %parameters
+solver	 = SMALL_init_solver('ompbox','omp2',ompParam,false); %solver structure
+
+
+%% Test
+nDecorrAlgs = length(dicDecorr);		%number of decorrelation algorithms
+nCorrLevels = length(minCoherence);		%number of coherence levels
+nInitDicts  = length(initDicts);		%number of initial dictionaries
+
+SMALL.DL(nTrials,nInitDicts,nCorrLevels,nDecorrAlgs) = SMALL_init_DL(toolbox); %create dictionary learning structures
+for iTrial=1:nTrials
+	for iInitDicts=1:nInitDicts
+		for iCorrLevels=1:nCorrLevels
+			for iDecorrAlgs=1:nDecorrAlgs
+				SMALL.DL(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs).toolbox = toolbox;
+				SMALL.DL(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs).name = dicUpdate;
+				SMALL.DL(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs).profile = true;
+				SMALL.DL(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs).param = ...
+					struct( 'data',SMALL.Problem.b,...
+					'Tdata',nActiveAtoms,...
+					'dictsize',dictSize,...
+					'iternum',iterNum,...
+					'memusage','high',...
+					'solver',solver,...
+					'decFcn',dicDecorr{iDecorrAlgs},...
+					'coherence',minCoherence(iCorrLevels),...
+					'initdict',initDicts(iInitDicts));
+				
+				SMALL.DL(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs) = ...
+					SMALL_learn(SMALL.Problem,SMALL.DL(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs));
+				save('SMALL_DL','SMALL');
+			end
+		end
+	end
+end
+
+%% Evaluate coherence and snr of representation for the various methods
+sr = zeros(size(SMALL.DL));				%signal to noise ratio
+mu = zeros(nTrials,nInitDicts,nCorrLevels,nDecorrAlgs,blockSize);	%cumulative coherence
+dic(size(SMALL.DL)) = dictionary;		%initialise dictionary objects
+for iTrial=1:nTrials
+	for iInitDicts=1:nInitDicts
+		for iCorrLevels=1:nCorrLevels
+			for iDecorrAlgs=1:nDecorrAlgs
+				%Sparse representation
+				SMALL.Problem.A = SMALL.DL(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs).D;
+				tempSolver = SMALL_solve(SMALL.Problem,solver);
+				%calculate snr
+				sr(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs) = ...
+					snr(SMALL.Problem.b,SMALL.DL(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs).D*tempSolver.solution);
+				%calculate mu
+				dic(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs) = ...
+					dictionary(SMALL.DL(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs).D);
+				mu(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs,:) = ...
+					dic(iTrial,iInitDicts,iCorrLevels,iDecorrAlgs).cumcoherence;
+			end
+		end
+	end
+end
+
+%% Plot results
+minMu = sqrt((dictSize-blockSize)/(blockSize*(dictSize-1)));	%lowe bound on coherence
+initDictsNames = {'Data','Gabor'};
+dicDecorrNames = {'IPR','INK-SVD','IP'};
+lineStyles     = {'k.-','r*-','b+-'};
+for iInitDict=1:nInitDicts
+	figure, hold on, grid on
+	title([initDictsNames{iInitDict} ' Initialisation']);
+	plot([1 1],[0 25],'k-');
+	for iDecorrAlgs=1:nDecorrAlgs
+		coherenceLevels = squeeze(mean(mu(:,iInitDict,:,iDecorrAlgs,1),1));
+		meanSNRs		= squeeze(mean(sr(:,iInitDict,:,iDecorrAlgs),1));
+		stdSNRs			= squeeze(std(sr(:,iInitDict,:,iDecorrAlgs),0,1));
+		errorbar(coherenceLevels,meanSNRs,stdSNRs,lineStyles{iDecorrAlgs});
+	end
+	plot([minMu minMu],[0 25],'k--')
+	
+	set(gca,'YLim',[0 25],'XLim',[0 1.4]);
+	legend([{'\mu_{max}'},dicDecorrNames,{'\mu_{min}'}]);
+	xlabel('\mu');
+	ylabel('SNR (dB)');
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/examples/SMALL_test_mocod.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,145 @@
+clc, clear, close all
+
+%% Parameteres
+nTrials   = 10;										%number of trials of the experiment
+
+% Dictionary learning parameters
+toolbox   = 'TwoStepDL';							%dictionary learning toolbox
+dicUpdate = 'mocod';								%dictionary learning updates
+zeta	  = logspace(-2,2,10); 
+eta  	  = logspace(-2,2,10); 
+
+iterNum   = 20;				%number of iterations
+epsilon   = 1e-6;			%tolerance level
+dictSize  = 512;			%number of atoms in the dictionary
+percActiveAtoms = 5;		%percentage of active atoms
+
+% Test signal parameters
+signal    = audio('music03_16kHz.wav'); %audio signal
+blockSize = 256;						%size of audio frames
+overlap   = 0.5;						%overlap between consecutive frames
+
+% Dependent parameters
+nActiveAtoms = fix(blockSize/100*percActiveAtoms); %number of active atoms
+
+% Initial dictionaries
+gaborParam = struct('N',blockSize,'redundancyFactor',2,'wd',@rectwin);
+gaborDict = Gabor_Dictionary(gaborParam);
+initDicts = {[],gaborDict};
+
+%% Generate audio approximation problem
+signal			 = buffer(signal,blockSize,blockSize*overlap,@rectwin);	%buffer frames of audio into columns of the matrix S
+SMALL.Problem.b  = signal.S;
+SMALL.Problem.b1 = SMALL.Problem.b; % copy signals from training set b to test set b1 (needed for later functions)
+
+% omp2 sparse representation solver
+ompParam = struct('X',SMALL.Problem.b,'epsilon',epsilon,'maxatoms',nActiveAtoms); %parameters
+solver	 = SMALL_init_solver('ompbox','omp2',ompParam,false); %solver structure
+
+%% Test
+nInitDicts  = length(initDicts);		%number of initial dictionaries
+nZetas = length(zeta);
+nEtas  = length(eta);
+
+SMALL.DL(nTrials,nInitDicts,nZetas,nEtas) = SMALL_init_DL(toolbox); %create dictionary learning structures
+for iTrial=1:nTrials
+	for iInitDicts=1:nInitDicts
+		for iZetas=1:nZetas
+			for iEtas=1:nEtas
+				SMALL.DL(iTrial,iInitDicts,iZetas,iEtas).toolbox = toolbox;
+				SMALL.DL(iTrial,iInitDicts,iZetas,iEtas).name = dicUpdate;
+				SMALL.DL(iTrial,iInitDicts,iZetas,iEtas).profile = true;
+				SMALL.DL(iTrial,iInitDicts,iZetas,iEtas).param = ...
+					struct('data',SMALL.Problem.b,...
+					'Tdata',nActiveAtoms,...
+					'dictsize',dictSize,...
+					'iternum',iterNum,...
+					'memusage','high',...
+					'solver',solver,...
+					'initdict',initDicts(iInitDicts),...
+					'zeta',zeta(iZetas),...
+					'eta',eta(iEtas));
+				SMALL.DL(iTrial,iInitDicts,iZetas,iEtas) = ...
+					SMALL_learn(SMALL.Problem,SMALL.DL(iTrial,iInitDicts,iZetas,iEtas));
+			end
+		end
+	end
+end
+
+%% Evaluate coherence and snr of representation for the various methods
+sr = zeros(size(SMALL.DL));				%signal to noise ratio
+mu = zeros(iTrial,iInitDicts,iZetas,iEtas);	%coherence
+dic(size(SMALL.DL)) = dictionary;		%initialise dictionary objects
+for iTrial=1:nTrials
+	for iInitDicts=1:nInitDicts
+		for iZetas=1:nZetas
+			for iEtas=1:nEtas
+				%Sparse representation
+				SMALL.Problem.A = SMALL.DL(iTrial,iInitDicts,iZetas,iEtas).D;
+				tempSolver = SMALL_solve(SMALL.Problem,solver);
+				%calculate snr
+				sr(iTrial,iInitDicts,iZetas,iEtas) = ...
+					snr(SMALL.Problem.b,SMALL.DL(iTrial,iInitDicts,iZetas,iEtas).D*tempSolver.solution);
+				%calculate mu
+				dic(iTrial,iInitDicts,iZetas,iEtas) = ...
+					dictionary(SMALL.DL(iTrial,iInitDicts,iZetas,iEtas).D);
+				mu(iTrial,iInitDicts,iZetas,iEtas) = ...
+					dic(iTrial,iInitDicts,iZetas,iEtas).coherence;
+			end
+		end
+	end
+end
+
+save('MOCOD.mat')
+
+%% Plot results
+minMu = sqrt((dictSize-blockSize)/(blockSize*(dictSize-1)));	%lowe bound on coherence
+initDictsNames = {'Data','Gabor'};
+lineStyles     = {'k.-','r*-','b+-'};
+for iInitDict=1:nInitDicts
+	figure, hold on, grid on
+	title([initDictsNames{iInitDict} ' Initialisation']);
+	coherenceLevels = squeeze(mean(mu(:,iInitDict,:,:),1));
+	meanSNRs		= squeeze(mean(sr(:,iInitDict,:,:),1));
+	%stdSNRs				= squeeze(std(sr(:,iInitDict,iZetas,iEtas),0,1));
+	subplot(2,2,1)
+	surf(eta,zeta,coherenceLevels);
+	set(gca,'Xscale','log','Yscale','log','ZLim',[0 1.4]);
+	view(gca,130,20)
+	xlabel('\eta');
+	ylabel('\zeta');
+	zlabel('\mu');
+	title('Coherence')
+	
+	subplot(2,2,2)
+	surf(eta,zeta,meanSNRs);
+	set(gca,'Xscale','log','Yscale','log','ZLim',[0 25]);
+	view(gca,130,20)
+	xlabel('\eta');
+	ylabel('\zeta');
+	zlabel('SNR (dB)');
+	title('Reconstruction Error')
+	
+	subplot(2,2,[3 4])
+	mus = mu(:,iInitDict,:,:);
+	mus = mus(:);
+	SNRs = sr(:,iInitDict,:,:);
+	SNRs = SNRs(:);
+	[un idx] = sort(mus);
+	plot([1 1],[0 25],'k')
+	hold on, grid on
+	scatter(mus(idx),SNRs(idx),'k+');
+	plot([minMu minMu],[0 25],'k--')
+	set(gca,'YLim',[0 25],'XLim',[0 1.4]);
+	xlabel('\mu');
+	ylabel('SNR (dB)');
+	legend([{'\mu_{max}'},'MOCOD',{'\mu_{min}'}]);
+	title('Coherence-Reconstruction Error Tradeoff')
+	
+% 	plot([minMu minMu],[0 25],'k--')
+% 	
+% 	set(gca,'YLim',[0 25],'XLim',[0 1.4]);
+% 	legend([{'\mu_{max}'},dicDecorrNames,{'\mu_{min}'}]);
+% 	xlabel('\mu');
+% 	ylabel('SNR (dB)');
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/SMALL_ImgDeNoiseResult.m.orig	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,50 @@
+function SMALL_ImgDeNoiseResult(SMALL)   
+%%  Represents the results of Dictionary Learning for Image denoising
+%
+%   Function gets as input SMALL structure and plots  Image Denoise
+%   results: Original Image, Noisy Image and for learned dictionaries and 
+%   denoised images 
+%
+
+%   Centre for Digital Music, Queen Mary, University of London.
+%   This file copyright 2010 Ivan Damnjanovic.
+%
+%   This program is free software; you can redistribute it and/or
+%   modify it under the terms of the GNU General Public License as
+%   published by the Free Software Foundation; either version 2 of the
+%   License, or (at your option) any later version.  See the file
+%   COPYING included with this distribution for more information.
+%%
+
+
+figure('Name', sprintf('Image %s (training set size- %d, sigma - %d)',SMALL.Problem.name, SMALL.Problem.n, SMALL.Problem.sigma));
+
+m=size(SMALL.solver,2)+1;
+maxval=SMALL.Problem.maxval;
+im=SMALL.Problem.Original;
+imnoise=SMALL.Problem.Noisy;
+
+subplot(2, m, 1); imagesc(im/maxval);colormap(gray);axis off; axis image;        % Set aspect ratio to obtain square pixels
+title('Original image');
+
+subplot(2,m,m+1); imagesc(imnoise/maxval);axis off; axis image; 
+title(sprintf('Noisy image, PSNR = %.2fdB', SMALL.Problem.noisy_psnr ));
+
+for i=2:m
+    
+    subplot(2, m, i); imagesc(SMALL.solver(i-1).reconstructed.Image/maxval);axis off; axis image; 
+    title(sprintf('%s Denoised image, PSNR: %.2f dB in %.2f s',...
+        SMALL.DL(i-1).name, SMALL.solver(i-1).reconstructed.psnr, SMALL.solver(i-1).time ),'Interpreter','none');
+    if strcmpi(SMALL.DL(i-1).name,'ksvds')
+        D = kron(SMALL.Problem.basedict{2},SMALL.Problem.basedict{1})*SMALL.DL(i-1).D;
+    else
+        D = SMALL.DL(i-1).D;
+    end
+    dictimg = SMALL_showdict(D,SMALL.Problem.blocksize,...
+        round(sqrt(size(D,2))),round(sqrt(size(D,2))),'lines','highcontrast');
+    
+    subplot(2,m,m+i);imagesc(dictimg);axis off; axis image; 
+    title(sprintf('%s dictionary in %.2f s',...
+        SMALL.DL(i-1).name, SMALL.DL(i-1).time),'Interpreter','none');
+    
+end
--- a/util/SMALL_learn.m	Wed Sep 07 14:17:55 2011 +0100
+++ b/util/SMALL_learn.m	Thu Nov 17 11:22:17 2011 +0000
@@ -82,6 +82,10 @@
         DL.D(:,i)=DL.D(:,i)/norm(DL.D(:,i));
     end
     D = DL.D; 
+
+elseif strcmpi(DL.toolbox,'dl_ramirez')
+	DL = dl_ramirez(Problem,DL);
+	D = normcol(DL.D);
     
 %   To introduce new dictionary learning technique put the files in
 %   your Matlab path. Next, unique name <TolboxID> for your toolbox needs 
@@ -119,4 +123,4 @@
   DL.D = full(D);
   
 end
-  
\ No newline at end of file
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/@audio/audio.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,63 @@
+classdef audio
+	%% Audio object
+	properties
+		s  %vector containing the audio signal
+		S  %matrix containing frames of audio for subsequent processing
+		fs %sampling frequency
+		nBits %number of bits per sample
+		name %string containing the name of the audio file
+		format %string containing the format of the audio file
+		bufferOperator %struct containing the parameters of the buffer operator (used by unbuffer to invert it)
+	end
+	
+	methods
+		%% Constructor
+		function obj = audio(varargin)
+			% if no arguments are specified, prompt for the choice of an
+			% audio file
+			if ~nargin
+				[fileName,pathname] = uigetfile({'*.wav; *.aiff;'},'Select an audio file');
+				varargin{1} = strcat(pathname,filesep,fileName);
+			end
+			if ischar(varargin{1})
+				[~, obj.name obj.format] = fileparts(varargin{1});
+				switch obj.format
+					case '.wav'
+						[obj.s obj.fs obj.nBits] = wavread(varargin{1});
+					otherwise
+						error('Unsupported audio format')
+				end
+			else
+				obj.s = varargin{1};
+				if nargin>1, obj.fs = varargin{2}; else obj.fs = []; end
+				if nargin>2, obj.nBits = varargin{3}; else obj.nBits = []; end
+				if nargin>3, obj.name = varargin{4}; else obj.name = []; end
+				if nargin>4, obj.format = varargin{5}; else obj.format = []; end
+			end
+			obj.S = [];
+			obj.bufferOperator = [];
+		end
+		
+		%% Playback functions
+		function player = play(obj, player)
+			if ~exist('player','var') || isempty(player)
+				player = audioplayer(obj.s,obj.fs);
+			end
+			play(player);
+		end
+		
+		function player = stop(obj, player)
+			if ~exist('player','var') || isempty(player)
+				player = audioplayer(obj.s,obj.fs);
+			end
+			stop(player)
+		end
+		
+		function player = pause(obj, player)
+			if ~exist('player','var') || isempty(player)
+				player = audioplayer(obj.s,obj.fs);
+			end
+			pause(player)
+		end
+	end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/@audio/buffer.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,27 @@
+function obj = buffer(obj,wLength,overlap,window,method)
+
+%% Check inputs & Defaults
+error(nargchk(2, 5, nargin, 'struct'));
+
+if ~exist('overlap','var') || isempty(overlap), overlap = 0; end
+if ~exist('method','var') || isempty(method), method = 'standard'; end
+
+%% Buffer audio
+switch lower(method)
+	case 'standard'
+		if ~exist('window','var') || isempty(window), window = @rectwin; end
+		validWindows = {'hanning','hamming','triang','rectwin'};
+		if ~sum(strcmpi(validWindows,func2str(window)));
+			error('The window chosen is not valid because it cannot be inverted!');
+		end
+		obj.S = diag(window(wLength))*buffer(obj.s,wLength,overlap,'nodelay');
+	case 'lot'
+		if ~exist('window','var') || isempty(window), window = 'sin2'; end
+		s_lot = lot(obj.s,wLength,'id',overlap,window);
+		obj.S = buffer(s_lot,wLength);
+	otherwise
+		error('Please specify a valid buffer method');
+end
+
+obj.bufferOperator = struct('wLength',wLength,'overlap',...
+	overlap,'window',window,'method',method);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/@audio/plot.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,95 @@
+function plot(obj)
+
+figure,
+playbackPanelH = uipanel(gcf,'Units','Normalized','Position',[.3 0 .4 .1]);
+
+buttWidth = 1/6;
+centers = linspace(0,1,7)-buttWidth/2;
+rewButtonH = uicontrol(playbackPanelH,'Style','pushbutton','String','<<','Units',...
+	'Normalized','Position',[centers(2) 0.2 buttWidth 0.6]);
+ffButtonH = uicontrol(playbackPanelH,'Style','togglebutton','String','>>','Units',...
+	'Normalized','Position',[centers(3) 0.2 buttWidth 0.6]);
+stopButtonH = uicontrol(playbackPanelH,'Style','pushbutton','String','stop','Units',...
+	'Normalized','Position',[centers(4) 0.2 buttWidth 0.6]);
+playButtonH = uicontrol(playbackPanelH,'Style','togglebutton','String','play','Units',...
+	'Normalized','Position',[centers(5) 0.2 buttWidth 0.6]);
+pauseButtonH = uicontrol(playbackPanelH,'Style','togglebutton','String','||','Units',...
+	'Normalized','Position',[centers(6) 0.2 buttWidth 0.6]);
+
+waveformPanelH = uipanel(gcf,'Units','Normalized','Position',[.02 .12 .96 .86]);
+waveformAxesH = axes('Parent',waveformPanelH);
+
+%% Plot the time domain signal
+s = obj.s;
+fs = obj.fs;
+plot((1:length(s))/fs,s);
+title('Audio signal')
+xlabel('time (s)');
+axis tight
+
+player = audioplayer(s,fs);
+set(player,'TimerPeriod',0.1);
+set(player,'StartFcn',@plotTransportBar);
+set(player,'TimerFcn',@updateTransportBar);
+set(player,'StopFcn',@deleteTransportBar);
+
+%% Add playback controls
+set(playButtonH,'Callback',@play_callback);
+set(stopButtonH,'Callback',@stop_callback);
+set(pauseButtonH,'Callback',@pause_callback);
+set(rewButtonH,'Callback',@rew_callback);
+set(ffButtonH,'Callback',@ff_callback);
+
+	function play_callback(~,~)
+		set(player,'SampleRate',fs);
+		play(player,player.CurrentSample);
+		set(pauseButtonH,'Value',0);
+		set(ffButtonH,'Value',0);
+	end
+
+	function pause_callback(~,~)
+		pause(player);
+		set(playButtonH,'Value',0);
+		set(ffButtonH,'Value',0);
+	end
+
+	function stop_callback(~,~)
+		stop(player);
+		set(playButtonH,'Value',0);
+		set(pauseButtonH,'Value',0);
+		set(ffButtonH,'Value',0);
+	end
+
+	function ff_callback(~,~)
+		set(player,'SampleRate',1.5*fs);
+		set(pauseButtonH,'Value',0);
+		set(playButtonH,'Value',0);
+	end
+
+	function rew_callback(~,~)
+		stop(player);
+		play(player);
+		set(pauseButtonH,'Value',0);
+		set(playButtonH,'Value',1);
+	end
+
+%% Transport Bar functions
+	function plotTransportBar(~,~)
+		global tbH
+		xLim = get(gca,'Xlim');
+		yLim = get(gca,'YLim');
+		tbH = line([xLim(1) xLim(1)],yLim,'Color','k');
+	end
+
+	function updateTransportBar(hObject,~)
+		global tbH
+		currentSample = hObject.CurrentSample;
+		pos = currentSample/fs;
+		set(tbH,'XData',[pos pos]);
+	end
+
+	function deleteTransportBar(~,~)
+		global tbH
+		delete(tbH);
+	end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/@audio/unbuffer.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,27 @@
+function s = unbuffer(obj)
+
+%% Check inputs and Defaults
+if ~isprop(obj,'bufferOperator') || isempty(obj.bufferOperator)
+	error('You must buffer a signal before unbuffer it, come on!');
+end
+
+switch lower(obj.bufferOperator.method)
+	%Unbuffer using overlap-add method
+	case 'standard'
+		w = obj.bufferOperator.window(obj.bufferOperator.wLength);
+		S = diag(1./w)*obj.S;
+		%Non overlapping case
+		if obj.bufferOperator.overlap == 0
+			s = S(:);
+		%Overlapping case
+		else
+			Stemp = S(1:obj.bufferOperator.wLength-obj.bufferOperator.overlap,1:end);
+			s = [Stemp(:); S(obj.bufferOperator.wLength-obj.bufferOperator.overlap+1:end,end)];
+		end
+	%Unbuffer using inverse lot with identity local transform
+	case 'lot'
+		s = ilot(obj.S(:),obj.bufferOperator.wLength,'id',...
+			obj.bufferOperator.overlap,obj.bufferOperator.window);
+	otherwise
+		error('Please specify a valid buffer method');
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/@dictionary/cumcoherence.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,19 @@
+function mu = cumcoherence(obj)
+obj = normalize(obj);
+[M N] = size(obj.phi);
+mu = zeros(M,1);
+for m=1:M
+    c = zeros(N);
+    for i=1:N
+        c(:,i) = abs(obj.phi'*obj.phi(:,i));
+        c(i,i) = 0;
+    end
+    c = sort(c,'descend');
+    c = c(1:m,:);
+    if m==1
+        mu(m) = max(c);
+    else
+        mu(m) = max(sum(c));
+    end
+end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/@dictionary/dictionary.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,114 @@
+classdef dictionary
+    %% Dictionary for sparse representation
+    properties
+        phi         %Matrix containing the dictionary
+        len         %Length of basis functions
+        nAtoms      %Number of basis function
+        name        %String containing the matrix ensemble from which the dictionary is drawn
+    end
+    properties (Dependent = true)
+        redundancy      %Redundancy of the dictionary: nAtoms/len
+        coherence       %Maximum inner product of different basis
+        isNormalised    %True if the atoms have unit norm
+        rank            %rank of the dictionary
+    end
+    
+    methods
+        %% Constructor
+        function obj = dictionary(phi,len,nAtoms)
+            % obj = dictionary(phi,len,nAtoms)
+            % INPUTS:
+            % - phi: either a string specifying a matrix ensamble or a
+            % matrix defining an explicit dictionary
+            % - len: length of the atoms (only for implicit dictionaries)
+            % - nAtoms: number of atoms (only for implicit dictionaries)
+            if nargin
+                if ~ischar(phi)
+                    [obj.len obj.nAtoms] = size(phi);
+                    obj.phi              = phi;
+                    obj.name             = 'explicit';
+                else
+                    switch lower(phi)
+                        case 'dct'
+                            obj.phi = dctmatrix(len,nAtoms);
+                        case 'grassmanian'
+                            obj.phi = grassmanian(len,nAtoms);
+                        otherwise
+                            obj.phi = MatrixEnsemble(len,nAtoms,phi);
+                    end
+                    obj.len    = len;
+                    obj.nAtoms = nAtoms;
+                    obj.name   = lower(phi);
+                end
+            end
+        end
+        
+        %% Dependent properties
+        function redundancy = get.redundancy(obj)
+            redundancy = obj.nAtoms/obj.len;
+        end
+        function coherence = get.coherence(obj)
+            obj.phi = normcol(obj.phi);
+            G = obj.phi'*obj.phi;
+            G = G - eye(size(G));
+            coherence = max(abs(G(:)));
+        end
+        function isNormalised = get.isNormalised(obj)
+            isNormalised = norm(sum(conj(obj.phi).*obj.phi) - ...
+                ones(1,obj.nAtoms))<1e-9;
+        end
+        function r = get.rank(obj)
+            r = rank(obj.phi);
+        end
+        %% Operations
+        function obj = normalize(obj)
+            obj.phi = normcol(obj.phi);
+        end
+        
+        %% Visualization
+        function image(obj)
+            %Image of the dictionary
+            if isreal(obj.phi)
+                imagesc(obj.phi);
+                title('Dictionary');
+                xlabel('Atom number');
+            else
+                subplot(2,1,1)
+                imagesc(real(obj.phi));
+                title('Real');
+                xlabel('Atom number');
+                subplot(2,1,2)
+                imagesc(imag(obj.phi));
+                title('Imaginary');
+                xlabel('Atom number');
+            end
+        end
+        function imagegram(obj)
+            G = obj.phi'*obj.phi;
+            imagesc(G);
+            title('Gram Matrix')
+        end
+        function plot(obj,n)
+            %Plot of the n-th basis
+            if isreal(obj.phi)
+                plot(obj.phi(:,n));
+                title(['Atom number ' num2str(n) '/' num2str(size(obj.phi,2))]);
+            else
+                subplot(2,1,1)
+                plot(real(obj.phi(:,n)));
+                title(['Atom number ' num2str(n) '/' num2str(size(obj.phi,2)) ' - Real']);
+                subplot(2,1,2)
+                plot(imag(obj.phi(:,n)));
+                title(['Atom number ' num2str(n) '/' num2str(size(obj.phi,2)) ' - Imaginary']);
+            end
+        end
+        
+        function movie(obj)
+            %Movie of the basis
+            for i=1:size(obj.phi,2)
+                obj.plot(i);
+                pause(1/25);
+            end
+        end
+    end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/@dictionary/mtimes.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,11 @@
+function C = mtimes(A,B)
+isAdic = strcmp(class(A),'dictionary');
+isBdic = strcmp(class(B),'dictionary');
+if isAdic && ~isBdic
+    C = A.phi*B;
+elseif isBdic && ~isAdic
+    C = A*B.phi;
+elseif isAdic && isBdic
+    C = A.phi*B.phi;
+end
+end
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/@dictionary/plotcumcoherence.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,42 @@
+function plotcumcoherence(obj,mu)
+if ~exist('mu','var') || isempty(mu), mu = cumcoherence(obj); end
+[d N] = size(obj.phi);
+
+% Plot cumulative coherence with lower and upper bounds
+mumin = (1:d)*sqrt((N-d)/(d*(N-1)));
+mumax = (1:d);
+figure,
+subplot(1,6,1:2)
+hold on, grid on
+plot(1,mumax(1),'k-s');
+plot(1,mu(1),'ko');
+plot(1,mumin(1),'k-d')
+set(gca,'XScale','log','YScale','log');
+axis tight
+ylabel('\mu');
+ylim([mumin(1) 10])
+
+subplot(1,6,3:6)
+hold on, grid on
+plot(2:d,mumax(2:end),'k-s');
+plot(2:d,mu(2:end),'k-o');
+plot(2:d,mumin(2:end),'k-d');
+set(gca,'XScale','log','YScale','log');
+axis tight
+xlabel('k');
+ylabel('\mu(k)');
+ylim([mumin(1) 10])
+
+% Plot recovery bounds
+plot(2:d,1-mu(1:d-1),'r-o')
+plot(2:d,1-mumin(1:d-1)','r-d')
+plot([2 d],[1/3 1/3],'b');
+legend('\mu_{max}(k)','\mu(k)','\mu_{min}(k)','Exact-Sparse \mu','Exact-Sparse \mu_{min}','Sparse');
+
+
+% v = conv(mu,[1 1]);
+% ind = find(v<1, 1, 'last');
+% 
+% line([ind ind], [min(mu) max(mu)],'Color','red');
+% title(['Minimum allowed sparsity (Tanner):' num2str(ind/obj.len)]);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/@dictionary/spark.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,35 @@
+function varargout = Spark(obj)
+% Calculates the minimum number of linearly dependent atoms in the matrix A
+% WARNING: this function computes a combinatorial function, use only if the
+% size of the problem is small (i.e. <20)
+if nargout <= 1
+    A = obj.phi;
+    k = size(A,2);
+    if k>20
+        warning('This function computes a combinatorial function, use only if thesize of the problem is small (i.e. <20).');
+        choice = input('The calculation of spark will take a long time... do you wish to continue anyway (y/n)','s');
+        if strcmpi( choice,'n')
+            return
+        end
+    end
+    sigma = 2;
+    while true
+        P = nchoosek(1:k,sigma);
+        for i=1:size(P,1)
+            r = rank(A(:,P(i,:)));
+            if r<sigma
+                varargout{1} = sigma;
+                return
+            end
+        end
+        sigma = sigma + 1;
+        if sigma==k
+            varargout{1} = inf;
+            return
+        end
+    end
+else
+    %% TODO: calculate lower and upper bounds on the spark
+    varargout{1} = 2;
+    varargout{2} = inf;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/dictionaryMatrices/dctmatrix.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,24 @@
+function D = dctmatrix(N,K,type)
+
+error(nargchk(1,3,nargin,'struct'));
+if ~exist('type','var') || isempty(type), type='II'; end
+if ~exist('K','var') || isempty(K), K=N; end
+
+[c r] = meshgrid(0:K-1,0:N-1);
+switch type
+    case 'I'
+        D = cos(pi*c.*r/(K-1));
+        D(1,:) = D(1,:)/sqrt(2);
+        D(end,:) = D(end,:)/sqrt(2);
+    case 'II'
+        D = cos(pi*(2*c+1).*r/(2*K));
+        D(1,:) = D(1,:)/sqrt(2);
+    case 'III'
+        D = cos(pi*(2*r+1).*c/(2*K));
+        D(:,1) = D(:,1)/sqrt(2);
+    case 'IV'
+        D = cos(pi*(2*r+1+2*c+4*c.*r)/(4*K));
+    otherwise
+        error('unsupported dct type');
+end
+D = normcol(D);
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/dictionaryMatrices/grassmannian.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,47 @@
+function [A G res muMin] = grassmannian(n,m,nIter,dd1,dd2,initA,verb)
+% grassmanian attempts to create an n by m matrix with minimal mutual
+% coherence using an iterative projection method.
+%
+% [A G res] = grassmanian(n,m,nIter,dd1,dd2,initA)
+%
+% REFERENCE
+% M. Elad, Sparse and Redundant Representations, Springer 2010.
+
+%% Parameters and Defaults
+error(nargchk(2,7,nargin));
+
+if ~exist('verb','var')  || isempty(verb),  verb = false; end %verbose output
+if ~exist('initA','var') || isempty(initA), initA = randn(n,m); end %initial matrix
+if ~exist('dd2','var')   || isempty(dd2),   dd2 = 0.9; end %shrinking factor
+if ~exist('dd1','var')   || isempty(dd1),   dd1 = 0.9; end %percentage of coherences to be shrinked
+if ~exist('nIter','var') || isempty(nIter), nIter = 10; end %number of iterations
+
+%% Main algo
+A = normc(initA); %normalise columns
+[Uinit Sigma] = svd(A);
+G = A'*A; %gram matrix
+
+muMin = sqrt((m-n)/(n*(m-1)));              %Lower bound on mutual coherence (equiangular tight frame)
+res = zeros(nIter,1);
+if verb
+	fprintf(1,'Iter		mu_min		mu \n');
+end
+
+% optimise gram matrix
+for iIter = 1:nIter
+	gg  = sort(abs(G(:))); %sort inner products from less to most correlated
+	pos = find(abs(G(:))>=gg(round(dd1*(m^2-m))) & abs(G(:)-1)>1e-6); %find large elements of gram matrix
+	G(pos) = G(pos)*dd2;	%shrink large elements of gram matrix
+	[U S V] = svd(G);	%compute new SVD of gram matrix
+	S(n+1:end,1+n:end) = 0; %set small eigenvalues to zero (this ensures rank(G)<=d)
+	G = U*S*V';			%update gram matrix
+	G = diag(1./abs(sqrt(diag(G))))*G*diag(1./abs(sqrt(diag(G)))); %normalise gram matrix diagonal
+	if verb
+		Geye = G - eye(size(G));
+		fprintf(1,'%6i    %12.8f  %12.8f  \n',iIter,muMin,max(abs(Geye(:))));
+	end
+end
+
+[V_gram Sigma_gram] = svd(G);				%calculate svd decomposition of gramian
+Sigma_new = sqrt(Sigma_gram(1:n,:)).*sign(Sigma); %calculate singular values of dictionary
+A = Uinit*Sigma_new*V_gram';				%update dictionary
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/dictionaryMatrices/iterativeprojections.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,97 @@
+function [dic mus srs] = iterativeprojections(dic,mu,Y,X,params)
+% grassmanian attempts to create an n by m matrix with minimal mutual
+% coherence using an iterative projection method.
+%
+% REFERENCE
+%
+
+%% Parameters and Defaults
+if ~nargin, testiterativeprojections; return; end
+
+if ~exist('params','var') || isempty(param), params = struct; end
+if ~isfield(params,'nIter'), params.nIter = 10; end %number of iterations
+if ~isfield(params,'eps'),	 params.eps = 1e-9;	 end %tolerance level
+[n m] = size(dic);
+
+SNR = @(dic) snr(Y,dic*X);									 %SNR function
+MU  = @(dic) max(max(abs((dic'*dic)-diag(diag(dic'*dic))))); %coherence function
+
+%% Main algorithm
+dic = normc(dic);	%normalise columns
+alpha = m/n;		%ratio between number of atoms and ambient dimension
+
+mus = zeros(params.nIter,1);		%coherence at each iteration
+srs = zeros(params.nIter,1);		%signal to noise ratio at each iteration
+iIter = 1;
+while iIter<=params.nIter && MU(dic)>mu
+	fprintf(1,'Iteration number %u\n', iIter);
+	% calculate snr and coherence
+	mus(iIter) = MU(dic);
+	srs(iIter) = SNR(dic);
+	
+	% calculate gram matrix
+	G = dic'*dic;
+	
+	% project into the structural constraint set
+	H = zeros(size(G));				%initialise matrix
+	ind1 = find(abs(G(:))<=mu);		%find elements smaller than mu
+	ind2 = find(abs(G(:))>mu);		%find elements bigger than mu
+	H(ind1) = G(ind1);				%copy elements belonging to ind1
+	H(ind2) = mu*sign(G(ind2));		%threshold elements belonging to ind2
+	H(1:m+1:end) = 1;				%set diagonal to one
+	
+	% project into spectral constraint set
+	[~ , S, V] = svd(H);
+	%G = alpha*(V(:,1:n)*V(:,1:n)');
+	G = V(:,1:n)*S(1:n,1:n)*V(:,1:n)';
+	
+	% calculate dictionary
+	[~, S V] = svd(G);
+	dic = sqrt(S(1:n,:))*V';
+	
+	% rotate dictionary
+	options = struct('nIter',100,'step',0.001);
+	[~, ~, W] = rotatematrix(Y,dic*X,'conjgradlie',options);
+	dic = W*dic;
+	
+	iIter = iIter+1;
+end
+if iIter<params.nIter
+	mus(iIter:end) = mus(iIter);
+	srs(iIter:end) = srs(iIter);
+end
+% Test function
+function testiterativeprojections
+clc
+%define parameters
+n = 256;							%ambient dimension
+m = 512;							%number of atoms
+N = 1024;							%number of signals
+mu_min = sqrt((m-n)/(n*(m-1)));		%minimum coherence
+
+%initialise data
+X = sprandn(m,N,1);					%matrix of coefficients
+phi = normc(randn(n,m));			%dictionary
+temp = randn(n);
+W = expm(0.5*(temp-temp'));			%rotation matrix
+Y = W*phi*X;						%observed signals
+
+%optimise dictionary
+[~, mus srs] = iterativeprojections(phi,0.2,Y,X);
+
+%plot results
+nIter = length(mus);
+
+figure, 
+subplot(2,1,1)
+plot(1:nIter,srs,'kd-');
+xlabel('nIter');
+ylabel('snr (dB)');
+grid on
+
+subplot(2,1,2), hold on
+plot(1:nIter,mus,'ko-');
+plot([1 nIter],[mu_min mu_min],'k')
+grid on
+legend('\mu','\mu_{min}');
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/dictionaryMatrices/rotatematrix.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,113 @@
+function [Dhat cost W] = rotatematrix(D,Phi,method,param)
+%
+%
+%
+% REFERENCE
+% M.D. Plumbley, Geometrical Methods for Non-Negative ICA: Manifolds, Lie
+% Groups and Toral Subalgebra, Neurocomputing
+
+%% Parse inputs and set defaults
+if ~nargin, testrotatematrix; return, end
+
+if ~exist('param','var')  || isempty(param),  param  = struct; end
+if ~exist('method','var') || isempty(method), method = 'conjgradLie'; end
+if ~isfield(param,'nIter'), param.nIter = 100; end	%number of iterations
+if ~isfield(param,'eps'), param.eps = 1e-9; end		%tolerance level
+if ~isfield(param,'step'), param.step = 0.01; end
+
+J = @(W) 0.5*norm(D-W*Phi,'fro');		%cost function
+
+% Initialise variables
+cost = zeros(param.nIter,1);			%cost at each iteration
+W	 = eye(size(Phi,1));				%rotation matrix
+grad = ones(size(W));					%gradient
+t	 = param.step;						%step size
+Gprev = 0;								%previous gradient
+Hprev = 0;								%previous Lie search direction
+iIter = 1;								%iteration counter
+
+%% Main algorithm
+while iIter<=param.nIter && norm(grad,'fro')>eps
+	cost(iIter) = J(W);				%calculate cost
+	grad = (W*Phi-D)*Phi';			%calculate gradient
+	switch method
+		case 'unconstrained'		% gradient descent
+			eta = param.step;
+			W = W - eta*grad;		% update W by steepest descent
+		case 'tangent'				% self correcting tangent
+			eta = param.step;
+			W = W - 0.5*eta*(grad - W*grad'*W);
+			[U , ~, V] = svd(W);
+			W = U*V';
+		case 'steepestlie'			%steepest descent in Lie algebra
+			eta = param.step;
+			B = 2*skew(grad*W');	% calculate gradient in Lie algebra
+			W = expm(-eta*B)*W;		% update W by steepest descent
+		case 'linesearchlie'		% line search in Lie algebra
+			B = 2*skew(grad*W');	% calculate gradient in Lie algebra
+			H = -B;					% calculate direction as negative gradient
+			t = searchline(J,H,W,t);% line search in one-parameter Lie subalgebra
+			W = expm(t*H)*W;		% update W by line search
+		case 'conjgradlie'			% conjugate gradient in Lie algebra
+			G = 2*skew(grad*W');	% calculate gradient in Lie algebra
+			H = -G + polakRibiere(G,Gprev)*Hprev; %calculate conjugate gradient direction
+			t = searchline(J,H,W,t);% line search in one-parameter Lie subalgebra
+			W = expm(t*H)*W;		% update W by line search
+			Hprev = H;				% save search direction
+			Gprev = G;				% save gradient
+	end
+	iIter = iIter+1;				% update iteration counter
+end
+Dhat = W*Phi;						%rotate matrix
+cost(iIter:end) = cost(iIter-1);	%zero-pad cost vector
+end
+
+%% Support functions
+function gamma = polakRibiere(G,Gprev)
+%Polak-Ribiere rule for conjugate direction calculation
+gamma = G(:)'*(G(:)-Gprev(:))/(norm(Gprev(:))^2);
+if isnan(gamma) || isinf(gamma)
+	gamma = 0;
+end
+end
+
+function t = searchline(J,H,W,t)
+%Line search in one-parameter Lie subalgebra
+t = fminsearch(@(x) J(expm(x*H)*W),t);
+end
+
+function B = skew(A)
+%Skew-symmetric matrix
+B = 0.5*(A - A');
+end
+
+
+%% Test function
+function testrotatematrix
+clear, clc, close all
+n = 256;							%ambient dimension
+m = 512;							%number of atoms
+param.nIter = 300;				%number of iterations
+param.step  = 0.001;			%step size
+param.mu    = 0.01;			%regularization factor (for tangent method)
+methods = {'unconstrained','tangent','linesearchlie','conjgradlie'};
+
+Phi = randn(n,m);				%initial dictionary
+Qtrue = expm(skew(randn(n)));	%rotation matrix
+D = Qtrue*Phi;					%target dictionary
+
+cost = zeros(param.nIter,length(methods));
+for iIter=1:length(methods)
+	tic
+	[~, cost(:,iIter)] = rotatematrix(D,Phi,methods{iIter},param);
+	time = toc;
+	sprintf('Method %s completed in %f seconds \n',methods{iIter},time)
+end
+
+figure, plot(cost)
+set(gca,'XScale','log','Yscale','log')
+legend(methods)
+grid on
+xlabel('number of iterations')
+ylabel('J(W)')
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/classes/dictionaryMatrices/shrinkgram.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,67 @@
+function [dic mus] = shrinkgram(dic,mu,dd1,dd2,params)
+% grassmanian attempts to create an n by m matrix with minimal mutual
+% coherence using an iterative projection method.
+%
+% [A G res] = grassmanian(n,m,nIter,dd1,dd2,initA)
+%
+% REFERENCE
+% M. Elad, Sparse and Redundant Representations, Springer 2010.
+
+%% Parameters and Defaults
+if ~nargin, testshrinkgram; return; end
+
+if ~exist('dd2','var')   || isempty(dd2),   dd2 = 0.9; end %shrinking factor
+if ~exist('dd1','var')   || isempty(dd1),   dd1 = 0.9; end %percentage of coherences to be shrinked
+if ~exist('params','var') || isempty(params), params = struct; end
+if ~isfield(params,'nIter'), params.nIter = 100; end
+
+%% Main algo
+dic = normc(dic);	%normalise columns
+G = dic'*dic;		%gram matrix
+[n m] = size(dic);
+
+MU  = @(G) max(max(abs(G-diag(diag(G))))); %coherence function
+
+mus   = ones(params.nIter,1);
+iIter = 1;
+% optimise gram matrix
+while iIter<=params.nIter && MU(G)>mu
+	mus(iIter) = MU(G);		%calculate coherence
+	gg  = sort(abs(G(:)));	%sort inner products from less to most correlated
+	pos = find(abs(G(:))>=gg(round(dd1*(m^2-m))) & abs(G(:)-1)>1e-6); %find large elements of gram matrix
+	G(pos) = G(pos)*dd2;	%shrink large elements of gram matrix
+	[U S V] = svd(G);		%compute new SVD of gram matrix
+	S(n+1:end,1+n:end) = 0; %set small eigenvalues to zero (this ensures rank(G)<=d)
+	G = U*S*V';				%update gram matrix
+	G = diag(1./abs(sqrt(diag(G))))*G*diag(1./abs(sqrt(diag(G)))); %normalise gram matrix diagonal
+	iIter = iIter+1;
+end
+%if iIter<params.nIter
+%	mus(iIter:end) = mus(iIter-1);
+%end
+
+[V_gram Sigma_gram] = svd(G);				%calculate svd decomposition of gramian
+dic = sqrt(Sigma_gram(1:n,:))*V_gram';		%update dictionary
+
+function testshrinkgram
+clc
+%define parameters
+n = 256;							%ambient dimension
+m = 512;							%number of atoms
+N = 1024;							%number of signals
+mu_min = sqrt((m-n)/(n*(m-1)));		%minimum coherence
+
+%initialise data
+phi = normc(randn(n,m));			%dictionary
+
+%optimise dictionary
+[~, mus] = shrinkgram(phi,0.2);
+
+%plot results
+nIter = length(mus);
+
+figure, hold on
+plot(1:nIter,mus,'ko-');
+plot([1 nIter],[mu_min mu_min],'k')
+grid on
+legend('\mu','\mu_{min}');
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/util/snr.m	Thu Nov 17 11:22:17 2011 +0000
@@ -0,0 +1,6 @@
+function x = snr(s,r)
+% SNR calculates the signal-to-noise ratio between the signal s and its
+% representation r defined as:
+% SNR = 20*log10(||s||_2/||s-r||_2)
+err = s-r;
+x = mag2db(norm(s(:))/norm(err(:)));
\ No newline at end of file