Mercurial > hg > pycsalgos
comparison pyCSalgos/TST/RecommendedTST.py @ 60:a7082bb22644
Modified structure:should keep in this repo only the bare solvers. Anything related to Analysis should be moved to ABS repository.
Renamed RecomTST to TST, renamed lots of functions, removed Analysis.py and algos.py.
author | Nic Cleju <nikcleju@gmail.com> |
---|---|
date | Fri, 09 Mar 2012 16:25:31 +0200 |
parents | pyCSalgos/RecomTST/RecommendedTST.py@5f46ff51c7ff |
children |
comparison
equal
deleted
inserted
replaced
59:319927cc961d | 60:a7082bb22644 |
---|---|
1 | |
2 import numpy as np | |
3 import math | |
4 | |
5 #function beta = RecommendedTST(X,Y, nsweep,tol,xinitial,ro) | |
6 def RecommendedTST(X, Y, nsweep=300, tol=0.00001, xinitial=None, ro=None): | |
7 | |
8 # function beta=RecommendedTST(X,y, nsweep,tol,xinitial,ro) | |
9 # This function gets the measurement matrix and the measurements and | |
10 # the number of runs and applies the TST algorithm with optimally tuned parameters | |
11 # to the problem. For more information you may refer to the paper, | |
12 # "Optimally tuned iterative reconstruction algorithms for compressed | |
13 # sensing," by Arian Maleki and David L. Donoho. | |
14 # X : Measurement matrix; We assume that all the columns have | |
15 # almost equal $\ell_2$ norms. The tunning has been done on | |
16 # matrices with unit column norm. | |
17 # y : output vector | |
18 # nsweep : number of iterations. The default value is 300. | |
19 # tol : if the relative prediction error i.e. ||Y-Ax||_2/ ||Y||_2 < | |
20 # tol the algorithm will stop. If not provided the default | |
21 # value is zero and tha algorithm will run for nsweep | |
22 # iterations. The Default value is 0.00001. | |
23 # xinitial : This is an optional parameter. It will be used as an | |
24 # initialization of the algorithm. All the results mentioned | |
25 # in the paper have used initialization at the zero vector | |
26 # which is our default value. For default value you can enter | |
27 # 0 here. | |
28 # ro : This is a again an optional parameter. If not given the | |
29 # algorithm will use the default optimal values. It specifies | |
30 # the sparsity level. For the default value you may also used if | |
31 # rostar=0; | |
32 # | |
33 # Outputs: | |
34 # beta : the estimated coeffs. | |
35 # | |
36 # References: | |
37 # For more information about this algorithm and to find the papers about | |
38 # related algorithms like CoSaMP and SP please refer to the paper mentioned | |
39 # above and the references of that paper. | |
40 # | |
41 #--------------------- | |
42 # Original Matab code: | |
43 # | |
44 #colnorm=mean((sum(X.^2)).^(.5)); | |
45 #X=X./colnorm; | |
46 #Y=Y./colnorm; | |
47 #[n,p]=size(X); | |
48 #delta=n/p; | |
49 #if nargin<3 | |
50 # nsweep=300; | |
51 #end | |
52 #if nargin<4 | |
53 # tol=0.00001; | |
54 #end | |
55 #if nargin<5 | xinitial==0 | |
56 # xinitial = zeros(p,1); | |
57 #end | |
58 #if nargin<6 | ro==0 | |
59 # ro=0.044417*delta^2+ 0.34142*delta+0.14844; | |
60 #end | |
61 # | |
62 # | |
63 #k1=floor(ro*n); | |
64 #k2=floor(ro*n); | |
65 # | |
66 # | |
67 ##initialization | |
68 #x1=xinitial; | |
69 #I=[]; | |
70 # | |
71 #for sweep=1:nsweep | |
72 # r=Y-X*x1; | |
73 # c=X'*r; | |
74 # [csort, i_csort]=sort(abs(c)); | |
75 # I=union(I,i_csort(end-k2+1:end)); | |
76 # xt = X(:,I) \ Y; | |
77 # [xtsort, i_xtsort]=sort(abs(xt)); | |
78 # | |
79 # J=I(i_xtsort(end-k1+1:end)); | |
80 # x1=zeros(p,1); | |
81 # x1(J)=xt(i_xtsort(end-k1+1:end)); | |
82 # I=J; | |
83 # if norm(Y-X*x1)/norm(Y)<tol | |
84 # break | |
85 # end | |
86 # | |
87 #end | |
88 # | |
89 #beta=x1; | |
90 # | |
91 # End of original Matab code | |
92 #---------------------------- | |
93 | |
94 #colnorm=mean((sum(X.^2)).^(.5)); | |
95 colnorm = np.mean(np.sqrt((X**2).sum(0))) | |
96 X = X / colnorm | |
97 Y = Y / colnorm | |
98 [n,p] = X.shape | |
99 delta = float(n) / p | |
100 # if nargin<3 | |
101 # nsweep=300; | |
102 # end | |
103 # if nargin<4 | |
104 # tol=0.00001; | |
105 # end | |
106 # if nargin<5 | xinitial==0 | |
107 # xinitial = zeros(p,1); | |
108 # end | |
109 # if nargin<6 | ro==0 | |
110 # ro=0.044417*delta^2+ 0.34142*delta+0.14844; | |
111 # end | |
112 if xinitial is None: | |
113 xinitial = np.zeros(p) | |
114 if ro == None: | |
115 ro = 0.044417*delta**2 + 0.34142*delta + 0.14844 | |
116 | |
117 k1 = math.floor(ro*n) | |
118 k2 = math.floor(ro*n) | |
119 | |
120 #initialization | |
121 x1 = xinitial.copy() | |
122 I = [] | |
123 | |
124 for sweep in np.arange(nsweep): | |
125 r = Y - np.dot(X,x1) | |
126 c = np.dot(X.T, r) | |
127 #[csort, i_csort] = np.sort(np.abs(c)) | |
128 i_csort = np.argsort(np.abs(c)) | |
129 #I = numpy.union1d(I , i_csort(end-k2+1:end)) | |
130 I = np.union1d(I , i_csort[-k2:]) | |
131 #xt = X[:,I] \ Y | |
132 # Make sure X[:,np.int_(I)] is a 2-dimensional matrix even if I has a single value (and therefore yields a column) | |
133 if I.size is 1: | |
134 a = np.reshape(X[:,np.int_(I)],(X.shape[0],1)) | |
135 else: | |
136 a = X[:,np.int_(I)] | |
137 xt = np.linalg.lstsq(a, Y)[0] | |
138 #[xtsort, i_xtsort] = np.sort(np.abs(xt)) | |
139 i_xtsort = np.argsort(np.abs(xt)) | |
140 | |
141 J = I[i_xtsort[-k1:]] | |
142 x1 = np.zeros(p) | |
143 x1[np.int_(J)] = xt[i_xtsort[-k1:]] | |
144 I = J.copy() | |
145 if np.linalg.norm(Y-np.dot(X,x1)) / np.linalg.norm(Y) < tol: | |
146 break | |
147 #end | |
148 | |
149 #end | |
150 | |
151 return x1.copy() | |
152 |