annotate pyCSalgos/SL0/SL0.py @ 60:a7082bb22644

Modified structure:should keep in this repo only the bare solvers. Anything related to Analysis should be moved to ABS repository. Renamed RecomTST to TST, renamed lots of functions, removed Analysis.py and algos.py.
author Nic Cleju <nikcleju@gmail.com>
date Fri, 09 Mar 2012 16:25:31 +0200
parents f31d5484a573
children
rev   line source
nikcleju@9 1 # -*- coding: utf-8 -*-
nikcleju@9 2 """
nikcleju@9 3 Created on Sat Nov 05 18:39:54 2011
nikcleju@9 4
nikcleju@9 5 @author: Nic
nikcleju@9 6 """
nikcleju@9 7
nikcleju@9 8 import numpy as np
nikcleju@9 9
nikcleju@9 10 #function s=SL0(A, x, sigma_min, sigma_decrease_factor, mu_0, L, A_pinv, true_s)
nikcleju@9 11 def SL0(A, x, sigma_min, sigma_decrease_factor=0.5, mu_0=2, L=3, A_pinv=None, true_s=None):
nikcleju@9 12 #
nikcleju@9 13 # SL0(A, x, sigma_min, sigma_decrease_factor, mu_0, L, A_pinv, true_s)
nikcleju@9 14 #
nikcleju@9 15 # Returns the sparsest vector s which satisfies underdetermined system of
nikcleju@9 16 # linear equations A*s=x, using Smoothed L0 (SL0) algorithm. Note that
nikcleju@9 17 # the matrix A should be a 'wide' matrix (more columns than rows). The
nikcleju@9 18 # number of the rows of matrix A should be equal to the length of the
nikcleju@9 19 # column vector x.
nikcleju@9 20 #
nikcleju@9 21 # The first 3 arguments should necessarily be provided by the user. The
nikcleju@9 22 # other parameters have defult values calculated within the function, or
nikcleju@9 23 # may be provided by the user.
nikcleju@9 24 #
nikcleju@9 25 # Sequence of Sigma (sigma_min and sigma_decrease_factor):
nikcleju@9 26 # This is a decreasing geometric sequence of positive numbers:
nikcleju@9 27 # - The first element of the sequence of sigma is calculated
nikcleju@9 28 # automatically. The last element is given by 'sigma_min', and the
nikcleju@9 29 # change factor for decreasing sigma is given by 'sigma_decrease_factor'.
nikcleju@9 30 # - The default value of 'sigma_decrease_factor' is 0.5. Larger value
nikcleju@9 31 # gives better results for less sparse sources, but it uses more steps
nikcleju@9 32 # on sigma to reach sigma_min, and hence it requires higher
nikcleju@9 33 # computational cost.
nikcleju@9 34 # - There is no default value for 'sigma_min', and it should be
nikcleju@9 35 # provided by the user (depending on his/her estimated source noise
nikcleju@9 36 # level, or his/her desired accuracy). By `noise' we mean here the
nikcleju@9 37 # noise in the sources, that is, the energy of the inactive elements of
nikcleju@9 38 # 's'. For example, by the noiseless case, we mean the inactive
nikcleju@9 39 # elements of 's' are exactly equal to zero. As a rule of tumb, for the
nikcleju@9 40 # noisy case, sigma_min should be about 2 to 4 times of the standard
nikcleju@9 41 # deviation of this noise. For the noiseless case, smaller 'sigma_min'
nikcleju@9 42 # results in better estimation of the sparsest solution, and hence its
nikcleju@9 43 # value is determined by the desired accuracy.
nikcleju@9 44 #
nikcleju@9 45 # mu_0:
nikcleju@9 46 # The value of mu_0 scales the sequence of mu. For each vlue of
nikcleju@9 47 # sigma, the value of mu is chosen via mu=mu_0*sigma^2. Note that this
nikcleju@9 48 # value effects Convergence.
nikcleju@9 49 # The default value is mu_0=2 (see the paper).
nikcleju@9 50 #
nikcleju@9 51 # L:
nikcleju@9 52 # number of iterations of the internal (steepest ascent) loop. The
nikcleju@9 53 # default value is L=3.
nikcleju@9 54 #
nikcleju@9 55 # A_pinv:
nikcleju@9 56 # is the pseudo-inverse of matrix A defined by A_pinv=A'*inv(A*A').
nikcleju@9 57 # If it is not provided, it will be calculated within the function. If
nikcleju@9 58 # you use this function for solving x(t)=A s(t) for different values of
nikcleju@9 59 # 't', it would be a good idea to calculate A_pinv outside the function
nikcleju@9 60 # to prevent its re-calculation for each 't'.
nikcleju@9 61 #
nikcleju@9 62 # true_s:
nikcleju@9 63 # is the true value of the sparse solution. This argument is for
nikcleju@9 64 # simulation purposes. If it is provided by the user, then the function
nikcleju@9 65 # will calculate the SNR of the estimation for each value of sigma and
nikcleju@9 66 # it provides a progress report.
nikcleju@9 67 #
nikcleju@9 68 # Authors: Massoud Babaie-Zadeh and Hossein Mohimani
nikcleju@9 69 # Version: 1.4
nikcleju@9 70 # Last modified: 4 April 2010.
nikcleju@9 71 #
nikcleju@9 72 #
nikcleju@9 73 # Web-page:
nikcleju@9 74 # ------------------
nikcleju@9 75 # http://ee.sharif.ir/~SLzero
nikcleju@9 76 #
nikcleju@9 77 # Code History:
nikcleju@9 78 #--------------
nikcleju@9 79 # Version 2.0: 4 April 2010
nikcleju@9 80 # Doing a few small modifications that enable the code to work also
nikcleju@9 81 # for complex numbers (not only for real numbers).
nikcleju@9 82 #
nikcleju@9 83 # Version 1.3: 13 Sep 2008
nikcleju@9 84 # Just a few modifications in the comments
nikcleju@9 85 #
nikcleju@9 86 # Version 1.2: Adding some more comments in the help section
nikcleju@9 87 #
nikcleju@9 88 # Version 1.1: 4 August 2008
nikcleju@9 89 # - Using MATLAB's pseudo inverse function to generalize for the case
nikcleju@9 90 # the matrix A is not full-rank.
nikcleju@9 91 #
nikcleju@9 92 # Version 1.0 (first official version): 4 July 2008.
nikcleju@9 93 #
nikcleju@9 94 # First non-official version and algorithm development: Summer 2006
nikcleju@9 95 #---------------------
nikcleju@9 96 # Original Matab code:
nikcleju@9 97 #
nikcleju@9 98 #if nargin < 4
nikcleju@9 99 # sigma_decrease_factor = 0.5;
nikcleju@9 100 # A_pinv = pinv(A);
nikcleju@9 101 # mu_0 = 2;
nikcleju@9 102 # L = 3;
nikcleju@9 103 # ShowProgress = logical(0);
nikcleju@9 104 #elseif nargin == 4
nikcleju@9 105 # A_pinv = pinv(A);
nikcleju@9 106 # mu_0 = 2;
nikcleju@9 107 # L = 3;
nikcleju@9 108 # ShowProgress = logical(0);
nikcleju@9 109 #elseif nargin == 5
nikcleju@9 110 # A_pinv = pinv(A);
nikcleju@9 111 # L = 3;
nikcleju@9 112 # ShowProgress = logical(0);
nikcleju@9 113 #elseif nargin == 6
nikcleju@9 114 # A_pinv = pinv(A);
nikcleju@9 115 # ShowProgress = logical(0);
nikcleju@9 116 #elseif nargin == 7
nikcleju@9 117 # ShowProgress = logical(0);
nikcleju@9 118 #elseif nargin == 8
nikcleju@9 119 # ShowProgress = logical(1);
nikcleju@9 120 #else
nikcleju@9 121 # error('Error in calling SL0 function');
nikcleju@9 122 #end
nikcleju@9 123 #
nikcleju@9 124 #
nikcleju@9 125 ## Initialization
nikcleju@9 126 ##s = A\x;
nikcleju@9 127 #s = A_pinv*x;
nikcleju@9 128 #sigma = 2*max(abs(s));
nikcleju@9 129 #
nikcleju@9 130 ## Main Loop
nikcleju@9 131 #while sigma>sigma_min
nikcleju@9 132 # for i=1:L
nikcleju@9 133 # delta = OurDelta(s,sigma);
nikcleju@9 134 # s = s - mu_0*delta;
nikcleju@9 135 # s = s - A_pinv*(A*s-x); # Projection
nikcleju@9 136 # end
nikcleju@9 137 #
nikcleju@9 138 # if ShowProgress
nikcleju@9 139 # fprintf(' sigma=#f, SNR=#f\n',sigma,estimate_SNR(s,true_s))
nikcleju@9 140 # end
nikcleju@9 141 #
nikcleju@9 142 # sigma = sigma * sigma_decrease_factor;
nikcleju@9 143 #end
nikcleju@9 144 #
nikcleju@9 145 #
nikcleju@9 146 #####################################################################
nikcleju@9 147 #function delta=OurDelta(s,sigma)
nikcleju@9 148 #
nikcleju@9 149 #delta = s.*exp(-abs(s).^2/sigma^2);
nikcleju@9 150 #
nikcleju@9 151 #####################################################################
nikcleju@9 152 #function SNR=estimate_SNR(estim_s,true_s)
nikcleju@9 153 #
nikcleju@9 154 #err = true_s - estim_s;
nikcleju@9 155 #SNR = 10*log10(sum(abs(true_s).^2)/sum(abs(err).^2));
nikcleju@9 156
nikcleju@9 157 # End of original Matab code
nikcleju@9 158 #----------------------------
nikcleju@9 159
nikcleju@9 160 if A_pinv is None:
nikcleju@9 161 A_pinv = np.linalg.pinv(A)
nikcleju@9 162
nikcleju@9 163 if true_s is not None:
nikcleju@9 164 ShowProgress = True
nikcleju@9 165 else:
nikcleju@9 166 ShowProgress = False
nikcleju@9 167
nikcleju@9 168 # Initialization
nikcleju@9 169 #s = A\x;
nikcleju@9 170 s = np.dot(A_pinv,x)
nikcleju@9 171 sigma = 2.0*np.abs(s).max()
nikcleju@9 172
nikcleju@9 173 # Main Loop
nikcleju@9 174 while sigma>sigma_min:
nikcleju@9 175 for i in np.arange(L):
nikcleju@9 176 delta = OurDelta(s,sigma)
nikcleju@9 177 s = s - mu_0*delta
nikcleju@9 178 s = s - np.dot(A_pinv,(np.dot(A,s)-x)) # Projection
nikcleju@9 179
nikcleju@9 180 if ShowProgress:
nikcleju@9 181 #fprintf(' sigma=#f, SNR=#f\n',sigma,estimate_SNR(s,true_s))
nikcleju@9 182 string = ' sigma=%f, SNR=%f\n' % sigma,estimate_SNR(s,true_s)
nikcleju@9 183 print string
nikcleju@9 184
nikcleju@9 185 sigma = sigma * sigma_decrease_factor
nikcleju@9 186
nikcleju@9 187 return s
nikcleju@9 188
nikcleju@9 189
nikcleju@9 190 ####################################################################
nikcleju@9 191 #function delta=OurDelta(s,sigma)
nikcleju@9 192 def OurDelta(s,sigma):
nikcleju@9 193
nikcleju@9 194 return s * np.exp( (-np.abs(s)**2) / sigma**2)
nikcleju@9 195
nikcleju@9 196 ####################################################################
nikcleju@9 197 #function SNR=estimate_SNR(estim_s,true_s)
nikcleju@9 198 def estimate_SNR(estim_s, true_s):
nikcleju@9 199
nikcleju@9 200 err = true_s - estim_s
nikcleju@9 201 return 10*np.log10((true_s**2).sum()/(err**2).sum())
nikcleju@9 202