Mercurial > hg > pycsalgos
changeset 10:edb5a287e0bb
Matlab SL0
author | nikcleju |
---|---|
date | Sat, 05 Nov 2011 21:15:02 +0000 |
parents | f31d5484a573 |
children | 66d8654fb73e |
files | matlab/SL0/SL0.m matlab/SL0/SL0_approx.m scripts/ABSapprox.py scripts/study_analysis_rec_approx.m tests/sl0_test.py |
diffstat | 5 files changed, 775 insertions(+), 0 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/matlab/SL0/SL0.m Sat Nov 05 21:15:02 2011 +0000 @@ -0,0 +1,143 @@ +function s=SL0(A, x, sigma_min, sigma_decrease_factor, mu_0, L, A_pinv, true_s) +% +% SL0(A, x, sigma_min, sigma_decrease_factor, mu_0, L, A_pinv, true_s) +% +% Returns the sparsest vector s which satisfies underdetermined system of +% linear equations A*s=x, using Smoothed L0 (SL0) algorithm. Note that +% the matrix A should be a 'wide' matrix (more columns than rows). The +% number of the rows of matrix A should be equal to the length of the +% column vector x. +% +% The first 3 arguments should necessarily be provided by the user. The +% other parameters have defult values calculated within the function, or +% may be provided by the user. +% +% Sequence of Sigma (sigma_min and sigma_decrease_factor): +% This is a decreasing geometric sequence of positive numbers: +% - The first element of the sequence of sigma is calculated +% automatically. The last element is given by 'sigma_min', and the +% change factor for decreasing sigma is given by 'sigma_decrease_factor'. +% - The default value of 'sigma_decrease_factor' is 0.5. Larger value +% gives better results for less sparse sources, but it uses more steps +% on sigma to reach sigma_min, and hence it requires higher +% computational cost. +% - There is no default value for 'sigma_min', and it should be +% provided by the user (depending on his/her estimated source noise +% level, or his/her desired accuracy). By `noise' we mean here the +% noise in the sources, that is, the energy of the inactive elements of +% 's'. For example, by the noiseless case, we mean the inactive +% elements of 's' are exactly equal to zero. As a rule of tumb, for the +% noisy case, sigma_min should be about 2 to 4 times of the standard +% deviation of this noise. For the noiseless case, smaller 'sigma_min' +% results in better estimation of the sparsest solution, and hence its +% value is determined by the desired accuracy. +% +% mu_0: +% The value of mu_0 scales the sequence of mu. For each vlue of +% sigma, the value of mu is chosen via mu=mu_0*sigma^2. Note that this +% value effects Convergence. +% The default value is mu_0=2 (see the paper). +% +% L: +% number of iterations of the internal (steepest ascent) loop. The +% default value is L=3. +% +% A_pinv: +% is the pseudo-inverse of matrix A defined by A_pinv=A'*inv(A*A'). +% If it is not provided, it will be calculated within the function. If +% you use this function for solving x(t)=A s(t) for different values of +% 't', it would be a good idea to calculate A_pinv outside the function +% to prevent its re-calculation for each 't'. +% +% true_s: +% is the true value of the sparse solution. This argument is for +% simulation purposes. If it is provided by the user, then the function +% will calculate the SNR of the estimation for each value of sigma and +% it provides a progress report. +% +% Authors: Massoud Babaie-Zadeh and Hossein Mohimani +% Version: 1.4 +% Last modified: 4 April 2010. +% +% +% Web-page: +% ------------------ +% http://ee.sharif.ir/~SLzero +% +% Code History: +%-------------- +% Version 2.0: 4 April 2010 +% Doing a few small modifications that enable the code to work also +% for complex numbers (not only for real numbers). +% +% Version 1.3: 13 Sep 2008 +% Just a few modifications in the comments +% +% Version 1.2: Adding some more comments in the help section +% +% Version 1.1: 4 August 2008 +% - Using MATLAB's pseudo inverse function to generalize for the case +% the matrix A is not full-rank. +% +% Version 1.0 (first official version): 4 July 2008. +% +% First non-official version and algorithm development: Summer 2006 + +if nargin < 4 + sigma_decrease_factor = 0.5; + A_pinv = pinv(A); + mu_0 = 2; + L = 3; + ShowProgress = logical(0); +elseif nargin == 4 + A_pinv = pinv(A); + mu_0 = 2; + L = 3; + ShowProgress = logical(0); +elseif nargin == 5 + A_pinv = pinv(A); + L = 3; + ShowProgress = logical(0); +elseif nargin == 6 + A_pinv = pinv(A); + ShowProgress = logical(0); +elseif nargin == 7 + ShowProgress = logical(0); +elseif nargin == 8 + ShowProgress = logical(1); +else + error('Error in calling SL0 function'); +end + + +% Initialization +%s = A\x; +s = A_pinv*x; +sigma = 2*max(abs(s)); + +% Main Loop +while sigma>sigma_min + for i=1:L + delta = OurDelta(s,sigma); + s = s - mu_0*delta; + s = s - A_pinv*(A*s-x); % Projection + end + + if ShowProgress + fprintf(' sigma=%f, SNR=%f\n',sigma,estimate_SNR(s,true_s)) + end + + sigma = sigma * sigma_decrease_factor; +end + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +function delta=OurDelta(s,sigma) + +delta = s.*exp(-abs(s).^2/sigma^2); + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +function SNR=estimate_SNR(estim_s,true_s) + +err = true_s - estim_s; +SNR = 10*log10(sum(abs(true_s).^2)/sum(abs(err).^2)); \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/matlab/SL0/SL0_approx.m Sat Nov 05 21:15:02 2011 +0000 @@ -0,0 +1,158 @@ +function s=SL0_approx(A, x, eps, sigma_min, sigma_decrease_factor, mu_0, L, A_pinv, true_s) +% +% SL0(A, x, sigma_min, sigma_decrease_factor, mu_0, L, A_pinv, true_s) +% +% Returns the sparsest vector s which satisfies underdetermined system of +% linear equations A*s=x, using Smoothed L0 (SL0) algorithm. Note that +% the matrix A should be a 'wide' matrix (more columns than rows). The +% number of the rows of matrix A should be equal to the length of the +% column vector x. +% +% The first 3 arguments should necessarily be provided by the user. The +% other parameters have defult values calculated within the function, or +% may be provided by the user. +% +% Sequence of Sigma (sigma_min and sigma_decrease_factor): +% This is a decreasing geometric sequence of positive numbers: +% - The first element of the sequence of sigma is calculated +% automatically. The last element is given by 'sigma_min', and the +% change factor for decreasing sigma is given by 'sigma_decrease_factor'. +% - The default value of 'sigma_decrease_factor' is 0.5. Larger value +% gives better results for less sparse sources, but it uses more steps +% on sigma to reach sigma_min, and hence it requires higher +% computational cost. +% - There is no default value for 'sigma_min', and it should be +% provided by the user (depending on his/her estimated source noise +% level, or his/her desired accuracy). By `noise' we mean here the +% noise in the sources, that is, the energy of the inactive elements of +% 's'. For example, by the noiseless case, we mean the inactive +% elements of 's' are exactly equal to zero. As a rule of tumb, for the +% noisy case, sigma_min should be about 2 to 4 times of the standard +% deviation of this noise. For the noiseless case, smaller 'sigma_min' +% results in better estimation of the sparsest solution, and hence its +% value is determined by the desired accuracy. +% +% mu_0: +% The value of mu_0 scales the sequence of mu. For each vlue of +% sigma, the value of mu is chosen via mu=mu_0*sigma^2. Note that this +% value effects Convergence. +% The default value is mu_0=2 (see the paper). +% +% L: +% number of iterations of the internal (steepest ascent) loop. The +% default value is L=3. +% +% A_pinv: +% is the pseudo-inverse of matrix A defined by A_pinv=A'*inv(A*A'). +% If it is not provided, it will be calculated within the function. If +% you use this function for solving x(t)=A s(t) for different values of +% 't', it would be a good idea to calculate A_pinv outside the function +% to prevent its re-calculation for each 't'. +% +% true_s: +% is the true value of the sparse solution. This argument is for +% simulation purposes. If it is provided by the user, then the function +% will calculate the SNR of the estimation for each value of sigma and +% it provides a progress report. +% +% Authors: Massoud Babaie-Zadeh and Hossein Mohimani +% Version: 1.4 +% Last modified: 4 April 2010. +% +% +% Web-page: +% ------------------ +% http://ee.sharif.ir/~SLzero +% +% Code History: +%-------------- +% Version 2.0: 4 April 2010 +% Doing a few small modifications that enable the code to work also +% for complex numbers (not only for real numbers). +% +% Version 1.3: 13 Sep 2008 +% Just a few modifications in the comments +% +% Version 1.2: Adding some more comments in the help section +% +% Version 1.1: 4 August 2008 +% - Using MATLAB's pseudo inverse function to generalize for the case +% the matrix A is not full-rank. +% +% Version 1.0 (first official version): 4 July 2008. +% +% First non-official version and algorithm development: Summer 2006 + +if nargin < 5 + sigma_decrease_factor = 0.5; + A_pinv = pinv(A); + mu_0 = 2; + L = 3; + ShowProgress = logical(0); +elseif nargin == 5 + A_pinv = pinv(A); + mu_0 = 2; + L = 3; + ShowProgress = logical(0); +elseif nargin == 6 + A_pinv = pinv(A); + L = 3; + ShowProgress = logical(0); +elseif nargin == 7 + A_pinv = pinv(A); + ShowProgress = logical(0); +elseif nargin == 8 + ShowProgress = logical(0); +elseif nargin == 9 + ShowProgress = logical(1); +else + error('Error in calling SL0_approx function'); +end + + +% Initialization +%s = A\x; +s = A_pinv*x; +% Nic: +s = zeros(size(A,2),1); +s = randn(size(A,2),1); +sigma = 10; +%sigma = 2*max(abs(s)); + +% Main Loop +while sigma>sigma_min + for i=1:L + delta = OurDelta(s,sigma); + % Update s in the direction of steepest ascent + s = s - mu_0*delta; + % At this point, s no longer exactly satisfies x = A*s + % The original SL0 algorithm projects s onto {s | x = As} with + %s = s - A_pinv*(A*s-x); % Projection + % We want to project s onto {s | |x-As| < eps} + % We move onto the direction -A_pinv*(A*s-x), but only with a + % smaller step: + dir = A_pinv*(A*s-x); + if norm(A*dir) >= eps + s = s - (1-eps/norm(A*dir)) * dir; + end + assert(abs(norm(x - A*s) - eps) < 1e-4*eps) + end + + if ShowProgress + fprintf(' sigma=%f, SNR=%f\n',sigma,estimate_SNR(s,true_s)) + end + + sigma = sigma * sigma_decrease_factor; +end + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +function delta=OurDelta(s,sigma) + +delta = s.*exp(-abs(s).^2/sigma^2); + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +function SNR=estimate_SNR(estim_s,true_s) + +err = true_s - estim_s; +SNR = 10*log10(sum(abs(true_s).^2)/sum(abs(err).^2)); \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scripts/ABSapprox.py Sat Nov 05 21:15:02 2011 +0000 @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Nov 05 18:08:40 2011 + +@author: Nic +""" + +import numpy +import pyCSalgos + +def gap_paramsetup(y,M,Omega,epsilon,lbd): + gapparams = dict(num_iteration = 1000, + greedy_level = 0.9, + stopping_coefficientstopping_coefficient_size = 1e-4, + l2solver = 'pseudoinverse', + noise_level = epsilon) + return y,M,M.T,Omega,Omega.T,gapparams,numpy.zeros(Omega.shape[1]) + +def omp_paramsetup(y,M,Omega,epsilon,lbd): + gapparams = dict(num_iteration = 1000, + greedy_level = 0.9, + stopping_coefficientstopping_coefficient_size = 1e-4, + l2solver = 'pseudoinverse', + noise_level = epsilon) + return y,M,M.T,Omega,Omega.T,gapparams,numpy.zeros(Omega.shape[1]) + +gap = (pyCSalgos.GAP, gap_paramsetup) + + + +gap = (pyCSalgos.GAP, gap_paramsetup) + + + + +def mainrun(): + + algos = (gap, sl0) + + for algofunc,paramsetup in algos: + xrec = algofunc(algosetup(y, Omega, epsilon, lbd))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scripts/study_analysis_rec_approx.m Sat Nov 05 21:15:02 2011 +0000 @@ -0,0 +1,363 @@ +% File: study_analysis_rec_algos +% Run experiment to prove that our approx. ABS approach really converges to +% the anaysis solution as lambda -> infty +% For this, we need to generate signals that, provably, yield bad results +% with synthesis recovery, and good results with analysis recovery +% To do this we choose, N >> d, small l, and m close to d + +clear all +close all + +% ================================= +% Set up experiment parameters +%================================== +% Which form factor, delta and rho we want +sigma = 2; +%delta = 0.995; +%rho = 0.7; +%delta = 0.8; +%rho = 0.15; +delta = 0.5; +rho = 0.05; + +% Number of vectors to generate each time +numvects = 10; + +% Add noise +% This is norm(signal)/norm(noise), so power, not energy +SNRdb = 20; +%epsextrafactor = 2 + +% ================================= +% Processing the parameters +%================================== + +% Compute noiselevel from db +noiselevel = 1 / (10^(SNRdb/10)); + +% Set up parameter structure +% and generate data X as well +d = 50; +p = round(sigma*d); +m = round(delta*d); +l = round(d - rho*m); + +% Generate Omega and data based on parameters +Omega = Generate_Analysis_Operator(d, p); +%Make Omega more coherent +[U, S, V] = svd(Omega); +%Sdnew = diag(S) ./ (1:numel(diag(S)))'; +Sdnew = diag(S) .* (1:numel(diag(S)))'; % Make D coherent, not Omega! +Snew = [diag(Sdnew); zeros(size(S,1) - size(S,2), size(S,2))]; +Omega = U * Snew * V'; +[x0,y,M,Lambda, realnoise] = Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); + +datafilename = 'mat/data_SNR20'; +save(datafilename); +%load mat/data_SNR20 + +% Values of lambda +%lambdas = sqrt([1e-10 1e-8 1e-6 1e-4 1e-2 1 100 1e4 1e6 1e8 1e10]); +lambdas = [0 10.^linspace(-5, 4, 10)]; +%lambdas = 1000 +%lambdas = sqrt([0 1 10000]); + +% Algorithm identifiers +algonone = []; +ompk = 1; +ompeps = 2; +tst = 3; +bp = 4; +gap = 5; +nesta = 6; +sl0 = 7; +yall1 = 8; +spgl1 = 9; +nestasynth = 10; +numallalgos = 10; +algoname{ompk} = 'ABS OMPk'; +algoname{ompeps} = 'ABS OMPeps'; +algoname{tst} = 'ABS TST'; +algoname{bp} = 'ABS BP'; +algoname{gap} = 'GAP'; +algoname{nesta} = 'NESTA Analysis'; +algoname{sl0} = 'ABS SL0'; +algoname{yall1} = 'ABS YALL1'; +algoname{spgl1} = 'ABS SPGL1'; +algoname{nestasynth} = 'NESTA Synth'; + +% What algos to run +%algos = [gap, ompk, ompeps, tst, bp, sl0, yall1]; +%algos = [gap, ompk, ompeps, tst, bp]; +algos = [gap, sl0]; +numalgos = numel(algos); + +% Save mat file? +do_save_mat = 0; +matfilename = 'mat/approx_d50_sigma2_SNR20db_Dcoh'; + +% Save figures? +do_save_figs = 0; +figfilename = 'figs/approx_d50_sigma2_SNR20db_Dcoh'; + +% Show progressbar ? (not recommended when running on parallel threads) +do_progressbar = 0; +if do_progressbar + progressbar('Total', 'Current parameters'); +end + +% Init times +for i = 1:numalgos + elapsed(algos(i)) = 0; +end + +% Init results structure +results = []; + + +% ======== +% Run +% ======== + +% Run GAP and NESTA first +if any(algos == gap) + for iy = 1:size(y,2) + a = gap; + % Compute epsilon + %epsilon = epsextrafactor * noiselevel * norm(y(:,iy)); + epsilon = 1.1 * norm(realnoise(:,iy)); + % + gapparams = []; + gapparams.num_iteration = 1000; + gapparams.greedy_level = 0.9; + gapparams.stopping_coefficient_size = 1e-4; + gapparams.l2solver = 'pseudoinverse'; + %gapparams.noise_level = noiselevel; + gapparams.noise_level = epsilon; + timer(a) = tic; + xrec{a}(:,iy) = GAP(y(:,iy), M, M', Omega, Omega', gapparams, zeros(d,1)); + elapsed(a) = elapsed(a) + toc(timer(a)); + % + err{a}(iy) = norm(x0(:,iy) - xrec{a}(:,iy)); + relerr{a}(iy) = err{a}(iy) / norm(x0(:,iy)); + end + disp([ algoname{a} ': avg relative error = ' num2str(mean(relerr{a}))]); +end +if any(algos == nesta) + for iy = 1:size(y,2) + a = nesta; + % Compute epsilon + %epsilon = epsextrafactor * noiselevel * norm(y(:,iy)); + epsilon = 1.1 * norm(realnoise(:,iy)); + % + try + timer(a) = tic; + %xrec{a}(:,iy) = do_nesta_DemoNonProjector(x0(:,iy), M, Omega', 0); + + % Common heuristic: delta = sqrt(m + 2*sqrt(2*m))*sigma ????????? + [U,S,V] = svd(M,'econ'); + opts.U = Omega; + opts.Ut = Omega'; + opts.USV.U=U; + opts.USV.S=S; + opts.USV.V=V; + opts.TolVar = 1e-5; + opts.Verbose = 0; + xrec{a}(:,iy) = NESTA(M, [], y(:,iy), 1e-3, epsilon, opts); + elapsed(a) = elapsed(a) + toc(timer(a)); + catch err + disp('*****ERROR: NESTA throwed error *****'); + xrec{a}(:,iy) = zeros(size(x0(:,iy))); + end + % + err{a}(iy) = norm(x0(:,iy) - xrec{a}(:,iy)); + relerr{a}(iy) = err{a}(iy) / norm(x0(:,iy)); + end + disp([ algoname{a} ': avg relative error = ' num2str(mean(relerr{a}))]); +end + +for i = 1:numel(algos) + if algos(i) == gap + continue; + end + prevgamma{algos(i)} = zeros(p, numvects); +end + +% Run ABS algorithms (lambda-dependent) +for iparam = 1:numel(lambdas) + + % Read current parameters + lambda = lambdas(iparam); + + % Init stuff + for i = 1:numel(algos) + if algos(i) == gap || algos(i) == nesta + continue; + end + xrec{algos(i)} = zeros(d, numvects); + end + % + for i = 1:numel(algos) + if algos(i) == gap || algos(i) == nesta + continue; + end + err{algos(i)} = zeros(numvects,1); + relerr{algos(i)} = zeros(numvects,1); + end + + % For every generated signal do + for iy = 1:size(y,2) + + % Compute epsilon + %epsilon = epsextrafactor * noiselevel * norm(y(:,iy)); + epsilon = 1.1 * norm(realnoise(:,iy)); + + %-------------------------------- + % Reconstruct (and measure delay), Compute reconstruction error + %-------------------------------- + for i = 1:numel(algos) + a = algos(i); + if a == gap || a == nesta + continue + end + if a == ompk + timer(a) = tic; + xrec{a}(:,iy) = ABS_OMPk_approx(y(:,iy), Omega, M, p-l, lambda); + elapsed(a) = elapsed(a) + toc(timer(a)); + elseif a == ompeps + timer(a) = tic; + xrec{a}(:,iy) = ABS_OMPeps_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * x0(:,iy)); + elapsed(a) = elapsed(a) + toc(timer(a)); + elseif a == tst + timer(a) = tic; + xrec{a}(:,iy) = ABS_TST_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * x0(:,iy)); + elapsed(a) = elapsed(a) + toc(timer(a)); + elseif a == bp + timer(a) = tic; + [xrec{a}(:,iy), prevgamma{a}(:,iy)] = ABS_BP_approx(y(:,iy), Omega, M, epsilon, lambda, prevgamma{a}(:,iy), Omega * x0(:,iy)); + elapsed(a) = elapsed(a) + toc(timer(a)); + elseif a == yall1 + timer(a) = tic; + xrec{a}(:,iy) = ABS_YALL1_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * x0(:,iy)); + elapsed(a) = elapsed(a) + toc(timer(a)); +% elseif a == gap +% gapparams = []; +% gapparams.num_iteration = 40; +% gapparams.greedy_level = 0.9; +% gapparams.stopping_coefficient_size = 1e-4; +% gapparams.l2solver = 'pseudoinverse'; +% %gapparams.noise_level = noiselevel; +% gapparams.noise_level = epsilon; +% timer(a) = tic; +% xrec{a}(:,iy) = GAP(y(:,iy), M, M', Omega, Omega', gapparams, zeros(d,1)); +% elapsed(a) = elapsed(a) + toc(timer(a)); +% elseif a == nesta +% try +% timer(a) = tic; +% %xrec{a}(:,iy) = do_nesta_DemoNonProjector(x0(:,iy), M, Omega', 0); +% +% % Common heuristic: delta = sqrt(m + 2*sqrt(2*m))*sigma ????????? +% [U,S,V] = svd(M,'econ'); +% opts.U = Omega; +% opts.Ut = Omega'; +% opts.USV.U=U; +% opts.USV.S=S; +% opts.USV.V=V; +% opts.TolVar = 1e-5; +% opts.Verbose = 0; +% xrec{a}(:,iy) = NESTA(M, [], y(:,iy), 1e-3, epsilon, opts); +% elapsed(a) = elapsed(a) + toc(timer(a)); +% catch err +% disp('*****ERROR: NESTA throwed error *****'); +% xrec{a}(:,iy) = zeros(size(x0(:,iy))); +% end + elseif a == sl0 + timer(a) = tic; + xrec{a}(:,iy) = ABS_SL0_approx(y(:,iy), Omega, M, epsilon, lambda); + elapsed(a) = elapsed(a) + toc(timer(a)); + elseif a == spgl1 + timer(a) = tic; + xrec{a}(:,iy) = ABS_SPGL1_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * xrec{bp}(:,iy)); + elapsed(a) = elapsed(a) + toc(timer(a)); + elseif a == nestasynth + timer(a) = tic; + xrec{a}(:,iy) = ABS_NESTAsynth_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * xrec{bp}(:,iy)); + elapsed(a) = elapsed(a) + toc(timer(a)); + else + %error('No such algorithm!'); + end + % + % Compare to GAP instead! + %x0(:,iy) = xrec{gap}(:,iy); + % + err{a}(iy) = norm(x0(:,iy) - xrec{a}(:,iy)); + relerr{a}(iy) = err{a}(iy) / norm(x0(:,iy)); + end + + % Update progressbar +% if do_progressbar +% %frac2 = iy/numvects; +% %frac1 = ((iparam-1) + frac2) / count; +% if norm(frac2 - 1) < 1e-6 +% frac2 = 0; +% end +% frac2 = frac2 + incr2; +% frac1 = frac1 + incr1; +% progressbar(frac1, frac2); +% end + end + + %-------------------------------- + % Save results in big stucture & display + %-------------------------------- + % Save reconstructed signals + % Save rel & abs errors + % Display error + results(iparam).xrec = xrec; + results(iparam).err = err; + results(iparam).relerr = relerr; + % + %disp(['Simulation no. ' num2str(iparam)]); + disp(['Lambda = ' num2str(lambda) ':']); + for i = 1:numalgos + a = algos(i); + if a == gap || a == nesta + continue + end + disp([ algoname{a} ': avg relative error = ' num2str(mean(relerr{a}))]); + end +end + +% ================================= +% Save +% ================================= +if do_save_mat + save(matfilename); + disp(['Saved to ' matfilename]); +end + +% ================================= +% Plot +% ================================= +toplot = zeros(numel(lambdas), numalgos); + +relerrs = {results.relerr}; +for i = 1:numalgos + for j = 1:numel(lambdas) + toplot(j,i) = mean(relerrs{j}{algos(i)}); + end +end + +%h = plot(toplot); +h = semilogx(lambdas, toplot); +legend(algoname{algos}) +xlabel('Lambda') +ylabel('Average reconstruction error') +title('Reconstruction error with different algorithms') + +if (do_save_figs) + saveas(gcf, [figfilename '.fig']); + saveas(gcf, [figfilename '.png']); + saveTightFigure(gcf, [figfilename '.pdf']); +end +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/sl0_test.py Sat Nov 05 21:15:02 2011 +0000 @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Nov 05 20:29:02 2011 +Test SL0 algorithm +@author: Nic +""" + + +import numpy as np +import numpy.linalg +import scipy.io +import unittest +from pyCSalgos.SL0.SL0 import SL0 + +class SL0results(unittest.TestCase): + def testResults(self): + mdict = scipy.io.loadmat('SL0testdata.mat') + + # A = system matrix + # Y = matrix with measurements (on columns) + # sigmamin = vector with sigma_min + for A,Y,sigmamin,Xr in zip(mdict['cellA'].squeeze(),mdict['cellY'].squeeze(),mdict['sigmamin'].squeeze(),mdict['cellXr'].squeeze()): + for i in np.arange(Y.shape[1]): + + # Fix numpy error "LapackError: Parameter a has non-native byte order in lapack_lite.dgesdd" + A = A.newbyteorder('=') + Y = Y.newbyteorder('=') + sigmamin = sigmamin.newbyteorder('=') + Xr = Xr.newbyteorder('=') + + xr = SL0(A, Y[:,i], sigmamin) + + # check if found solution is the same as the correct cslution + diff = numpy.linalg.norm(xr - Xr[:,i]) + self.assertTrue(diff < 1e-12) + # err1 = numpy.linalg.norm(Y[:,i] - np.dot(A,xr)) + # err2 = numpy.linalg.norm(Y[:,i] - np.dot(A,Xr[:,i])) + # norm1 = numpy.linalg.norm(xr,1) + # norm2 = numpy.linalg.norm(Xr[:,i],1) + # + # # Make a more robust condition: + # # OK; if solutions are close enough (diff < 1e-6) + # # or + # # ( + # # Python solution fulfills the constraint better (or up to 1e-6 worse) + # # and + # # Python solution has l1 norm no more than 1e-6 larger as the reference solution + # # (i.e. either norm1 < norm2 or norm1>norm2 not by more than 1e-6) + # # ) + # # + # # ERROR: else + # differr = err1 - err2 # intentionately no abs(), since err1` < err2 is good + # diffnorm = norm1 - norm2 # intentionately no abs(), since norm1 < norm2 is good + # if diff < 1e-6 or (differr < 1e-6 and (diffnorm < 1e-6)): + # isok = True + # else: + # isok = False + # self.assertTrue(isok) + + #diff = numpy.linalg.norm(xr - Xr[:,i]) + #if diff > 1e-6: + # self.assertTrue(diff < 1e-6) + + +if __name__ == "__main__": + #import cProfile + #cProfile.run('unittest.main()', 'profres') + unittest.main() + #suite = unittest.TestLoader().loadTestsFromTestCase(CompareResults) + #unittest.TextTestRunner(verbosity=2).run(suite)