Mercurial > hg > pycsalgos
changeset 4:4393ad5bffc1
Implemented the RecommendedTST algorithm
Test file written, but test not yet working.
author | nikcleju |
---|---|
date | Mon, 24 Oct 2011 23:39:53 +0000 |
parents | 537f7798e186 |
children | 4a4e5204ecf5 |
files | matlab/RecomTST/RecommendedTST.m pyCSalgos/RecomTST/RecommendedTST.py pyCSalgos/RecomTST/__init__.py tests/RecomTST_test.py tests/RecomTSTgentest.m tests/RecomTSTtestdata.mat tests/l1qc_test.py |
diffstat | 6 files changed, 343 insertions(+), 4 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/matlab/RecomTST/RecommendedTST.m Mon Oct 24 23:39:53 2011 +0000 @@ -0,0 +1,82 @@ +function beta = RecommendedTST(X,Y, nsweep,tol,xinitial,ro) + +% function beta=RecommendedTST(X,y, nsweep,tol,xinitial,ro) +% This function gets the measurement matrix and the measurements and +% the number of runs and applies the TST algorithm with optimally tuned parameters +% to the problem. For more information you may refer to the paper, +% "Optimally tuned iterative reconstruction algorithms for compressed +% sensing," by Arian Maleki and David L. Donoho. +% X : Measurement matrix; We assume that all the columns have +% almost equal $\ell_2$ norms. The tunning has been done on +% matrices with unit column norm. +% y : output vector +% nsweep : number of iterations. The default value is 300. +% tol : if the relative prediction error i.e. ||Y-Ax||_2/ ||Y||_2 < +% tol the algorithm will stop. If not provided the default +% value is zero and tha algorithm will run for nsweep +% iterations. The Default value is 0.00001. +% xinitial : This is an optional parameter. It will be used as an +% initialization of the algorithm. All the results mentioned +% in the paper have used initialization at the zero vector +% which is our default value. For default value you can enter +% 0 here. +% ro : This is a again an optional parameter. If not given the +% algorithm will use the default optimal values. It specifies +% the sparsity level. For the default value you may also used if +% rostar=0; +% +% Outputs: +% beta : the estimated coeffs. +% +% References: +% For more information about this algorithm and to find the papers about +% related algorithms like CoSaMP and SP please refer to the paper mentioned +% above and the references of that paper. + + +colnorm=mean((sum(X.^2)).^(.5)); +X=X./colnorm; +Y=Y./colnorm; +[n,p]=size(X); +delta=n/p; +if nargin<3 + nsweep=300; +end +if nargin<4 + tol=0.00001; +end +if nargin<5 | xinitial==0 + xinitial = zeros(p,1); +end +if nargin<6 | ro==0 + ro=0.044417*delta^2+ 0.34142*delta+0.14844; +end + + +k1=floor(ro*n); +k2=floor(ro*n); + + +%initialization +x1=xinitial; +I=[]; + +for sweep=1:nsweep + r=Y-X*x1; + c=X'*r; + [csort, i_csort]=sort(abs(c)); + I=union(I,i_csort(end-k2+1:end)); + xt = X(:,I) \ Y; + [xtsort, i_xtsort]=sort(abs(xt)); + + J=I(i_xtsort(end-k1+1:end)); + x1=zeros(p,1); + x1(J)=xt(i_xtsort(end-k1+1:end)); + I=J; + if norm(Y-X*x1)/norm(Y)<tol + break + end + +end + +beta=x1;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/pyCSalgos/RecomTST/RecommendedTST.py Mon Oct 24 23:39:53 2011 +0000 @@ -0,0 +1,152 @@ + +import numpy as np +import math + +#function beta = RecommendedTST(X,Y, nsweep,tol,xinitial,ro) +def RecommendedTST(X, Y, nsweep=300, tol=0.00001, xinitial=None, ro=0): + + # function beta=RecommendedTST(X,y, nsweep,tol,xinitial,ro) + # This function gets the measurement matrix and the measurements and + # the number of runs and applies the TST algorithm with optimally tuned parameters + # to the problem. For more information you may refer to the paper, + # "Optimally tuned iterative reconstruction algorithms for compressed + # sensing," by Arian Maleki and David L. Donoho. + # X : Measurement matrix; We assume that all the columns have + # almost equal $\ell_2$ norms. The tunning has been done on + # matrices with unit column norm. + # y : output vector + # nsweep : number of iterations. The default value is 300. + # tol : if the relative prediction error i.e. ||Y-Ax||_2/ ||Y||_2 < + # tol the algorithm will stop. If not provided the default + # value is zero and tha algorithm will run for nsweep + # iterations. The Default value is 0.00001. + # xinitial : This is an optional parameter. It will be used as an + # initialization of the algorithm. All the results mentioned + # in the paper have used initialization at the zero vector + # which is our default value. For default value you can enter + # 0 here. + # ro : This is a again an optional parameter. If not given the + # algorithm will use the default optimal values. It specifies + # the sparsity level. For the default value you may also used if + # rostar=0; + # + # Outputs: + # beta : the estimated coeffs. + # + # References: + # For more information about this algorithm and to find the papers about + # related algorithms like CoSaMP and SP please refer to the paper mentioned + # above and the references of that paper. + # + #--------------------- + # Original Matab code: + # + #colnorm=mean((sum(X.^2)).^(.5)); + #X=X./colnorm; + #Y=Y./colnorm; + #[n,p]=size(X); + #delta=n/p; + #if nargin<3 + # nsweep=300; + #end + #if nargin<4 + # tol=0.00001; + #end + #if nargin<5 | xinitial==0 + # xinitial = zeros(p,1); + #end + #if nargin<6 | ro==0 + # ro=0.044417*delta^2+ 0.34142*delta+0.14844; + #end + # + # + #k1=floor(ro*n); + #k2=floor(ro*n); + # + # + ##initialization + #x1=xinitial; + #I=[]; + # + #for sweep=1:nsweep + # r=Y-X*x1; + # c=X'*r; + # [csort, i_csort]=sort(abs(c)); + # I=union(I,i_csort(end-k2+1:end)); + # xt = X(:,I) \ Y; + # [xtsort, i_xtsort]=sort(abs(xt)); + # + # J=I(i_xtsort(end-k1+1:end)); + # x1=zeros(p,1); + # x1(J)=xt(i_xtsort(end-k1+1:end)); + # I=J; + # if norm(Y-X*x1)/norm(Y)<tol + # break + # end + # + #end + # + #beta=x1; + # + # End of original Matab code + #---------------------------- + + #colnorm=mean((sum(X.^2)).^(.5)); + colnorm = np.mean(np.sqrt((X**2).sum(0))) + X = X / colnorm + Y = Y / colnorm + [n,p] = X.shape + delta = float(n) / p + # if nargin<3 + # nsweep=300; + # end + # if nargin<4 + # tol=0.00001; + # end + # if nargin<5 | xinitial==0 + # xinitial = zeros(p,1); + # end + # if nargin<6 | ro==0 + # ro=0.044417*delta^2+ 0.34142*delta+0.14844; + # end + if xinitial is None: + xinitial = np.zeros(p) + if ro == 0: + ro = 0.044417*delta**2 + 0.34142*delta + 0.14844 + + k1 = math.floor(ro*n) + k2 = math.floor(ro*n) + + #initialization + x1 = xinitial.copy() + I = [] + + for sweep in np.arange(nsweep): + r = Y - np.dot(X,x1) + c = np.dot(X.T, r) + #[csort, i_csort] = np.sort(np.abs(c)) + i_csort = np.argsort(np.abs(c)) + #I = numpy.union1d(I , i_csort(end-k2+1:end)) + I = np.union1d(I , i_csort[-k2:]) + #xt = X[:,I] \ Y + # Make sure X[:,np.int_(I)] is a 2-dimensional matrix even if I has a single value (and therefore yields a column) + if I.size is 1: + a = np.reshape(X[:,np.int_(I)],(X.shape[0],1)) + else: + a = X[:,np.int_(I)] + xt = np.linalg.lstsq(a, Y)[0] + #[xtsort, i_xtsort] = np.sort(np.abs(xt)) + i_xtsort = np.argsort(np.abs(xt)) + + J = I[i_xtsort[-k1:]] + x1 = np.zeros(p) + x1[np.int_(J)] = xt[i_xtsort[-k1:]] + I = J.copy() + if np.linalg.norm(Y-np.dot(X,x1)) / np.linalg.norm(Y) < tol: + break + #end + + #end + + return x1.copy() +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/RecomTST_test.py Mon Oct 24 23:39:53 2011 +0000 @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon Oct 24 21:17:49 2011 + +@author: Nic + +Test RecommendedTST algorithm +""" + +import numpy as np +import numpy.linalg +import scipy.io +import unittest +from pyCSalgos.RecomTST.RecommendedTST import RecommendedTST + +class RecomTSTresults(unittest.TestCase): + def testResults(self): + mdict = scipy.io.loadmat('RecomTSTtestdata.mat') + + # A = system matrix + # Y = matrix with measurements (on columns) + # X0 = matrix with initial solutions (on columns) + # Eps = vector with epsilon + # Xr = matrix with correct solutions (on columns) + for A,Y,X0,Tol,Xr in zip(mdict['cellA'].squeeze(),mdict['cellY'].squeeze(),mdict['cellX0'].squeeze(),mdict['cellTol'].squeeze(),mdict['cellXr'].squeeze()): + for i in np.arange(Y.shape[1]): + xr = RecommendedTST(A, Y[:,i], nsweep=300, tol=Tol.squeeze()[i], xinitial=X0[:,i]) + + # check if found solution is the same as the correct cslution + diff = numpy.linalg.norm(xr - Xr[:,i]) + self.assertTrue(diff < 1e-12) + #err1 = numpy.linalg.norm(Y[:,i] - np.dot(A,xr)) + #err2 = numpy.linalg.norm(Y[:,i] - np.dot(A,Xr[:,i])) + #norm1 = numpy.linalg.norm(xr,1) + #norm2 = numpy.linalg.norm(Xr[:,i],1) + #print 'diff = ',diff + #print 'err1 = ',err1 + #print 'err2 = ',err2 + #print 'norm1 = ',norm1 + #print 'norm2 = ',norm2 + # + # It seems Matlab's linsolve and scipy solve are slightly different + # Therefore make a more robust condition: + # OK; if solutions are close enough (diff < 1e-6) + # or + # ( + # they fulfill the constraint close enough (differr < 1e-6) + # and + # Python solution has l1 norm no more than 1e-6 larger as the reference solution + # (i.e. either norm1 < norm2 or norm1>norm2 not by more than 1e-6) + # ) + # + # ERROR: else + #differr = abs((err1 - err2)) + #diffnorm = norm1 - norm2 # intentionately no abs(), since norm1 < norm2 is good + #if diff < 1e-6 or (differr < 1e-6 and (diffnorm < 1e-6)): + # isok = True + #else: + # isok = False + #if not isok: + # print "should raise" + # #self.assertTrue(isok) + #self.assertTrue(isok) + +if __name__ == "__main__": + unittest.main(verbosity=2) + #suite = unittest.TestLoader().loadTestsFromTestCase(CompareResults) + #unittest.TextTestRunner(verbosity=2).run(suite) \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/RecomTSTgentest.m Mon Oct 24 23:39:53 2011 +0000 @@ -0,0 +1,36 @@ +% Run BP and save parameters and solutions as reference test data +% to check if other algorithms are correct + +numA = 10; +numY = 100; + +sizesA{1} = [50 100]; +sizesA{2} = [20 25]; +sizesA{3} = [10 120]; +sizesA{4} = [15 100]; +sizesA{5} = [70 100]; +sizesA{6} = [80 100]; +sizesA{7} = [90 100]; +sizesA{8} = [99 100]; +sizesA{9} = [100 100]; +sizesA{10} = [250 400]; + +for i = 1:numA + sz = sizesA{i}; + cellA{i} = randn(sz); + cellY{i} = randn(sz(1), numY); + for j = 1:numY + cellTol{i}(j) = rand / 5; % restrict from 0 to 20% if measurements + %cellX0{i}(:,j) = cellA{i} \ cellY{i}(:,j); + cellX0{i}(:,j) = zeros(size(cellA{i},2),1); + end +end +%load BPtestdata + +for i = 1:numA + for j = 1:numY + cellXr{i}(:,j) = RecommendedTST(cellA{i}, cellY{i}(:,j), 300, cellTol{i}(j), cellX0{i}(:,j)); + end +end + +save RecomTSTtestdata \ No newline at end of file
--- a/tests/l1qc_test.py Sun Oct 23 20:02:34 2011 +0000 +++ b/tests/l1qc_test.py Mon Oct 24 23:39:53 2011 +0000 @@ -20,7 +20,7 @@ class BPresults(unittest.TestCase): def testResults(self): - mdict = scipy.io.loadmat('./data/BPtestdata.mat') + mdict = scipy.io.loadmat('BPtestdata.mat') # A = system matrix # Y = matrix with measurements (on columns) @@ -62,9 +62,10 @@ else: isok = False - if not isok: - print "should raise" - #self.assertTrue(isok) + #if not isok: + # print "should raise" + # #self.assertTrue(isok) + self.assertTrue(isok) if __name__ == "__main__": unittest.main(verbosity=2)