nikcleju@4
|
1
|
nikcleju@4
|
2 import numpy as np
|
nikcleju@4
|
3 import math
|
nikcleju@4
|
4
|
nikcleju@4
|
5 #function beta = RecommendedTST(X,Y, nsweep,tol,xinitial,ro)
|
nikcleju@30
|
6 def RecommendedTST(X, Y, nsweep=300, tol=0.00001, xinitial=None, ro=None):
|
nikcleju@4
|
7
|
nikcleju@4
|
8 # function beta=RecommendedTST(X,y, nsweep,tol,xinitial,ro)
|
nikcleju@4
|
9 # This function gets the measurement matrix and the measurements and
|
nikcleju@4
|
10 # the number of runs and applies the TST algorithm with optimally tuned parameters
|
nikcleju@4
|
11 # to the problem. For more information you may refer to the paper,
|
nikcleju@4
|
12 # "Optimally tuned iterative reconstruction algorithms for compressed
|
nikcleju@4
|
13 # sensing," by Arian Maleki and David L. Donoho.
|
nikcleju@4
|
14 # X : Measurement matrix; We assume that all the columns have
|
nikcleju@4
|
15 # almost equal $\ell_2$ norms. The tunning has been done on
|
nikcleju@4
|
16 # matrices with unit column norm.
|
nikcleju@4
|
17 # y : output vector
|
nikcleju@4
|
18 # nsweep : number of iterations. The default value is 300.
|
nikcleju@4
|
19 # tol : if the relative prediction error i.e. ||Y-Ax||_2/ ||Y||_2 <
|
nikcleju@4
|
20 # tol the algorithm will stop. If not provided the default
|
nikcleju@4
|
21 # value is zero and tha algorithm will run for nsweep
|
nikcleju@4
|
22 # iterations. The Default value is 0.00001.
|
nikcleju@4
|
23 # xinitial : This is an optional parameter. It will be used as an
|
nikcleju@4
|
24 # initialization of the algorithm. All the results mentioned
|
nikcleju@4
|
25 # in the paper have used initialization at the zero vector
|
nikcleju@4
|
26 # which is our default value. For default value you can enter
|
nikcleju@4
|
27 # 0 here.
|
nikcleju@4
|
28 # ro : This is a again an optional parameter. If not given the
|
nikcleju@4
|
29 # algorithm will use the default optimal values. It specifies
|
nikcleju@4
|
30 # the sparsity level. For the default value you may also used if
|
nikcleju@4
|
31 # rostar=0;
|
nikcleju@4
|
32 #
|
nikcleju@4
|
33 # Outputs:
|
nikcleju@4
|
34 # beta : the estimated coeffs.
|
nikcleju@4
|
35 #
|
nikcleju@4
|
36 # References:
|
nikcleju@4
|
37 # For more information about this algorithm and to find the papers about
|
nikcleju@4
|
38 # related algorithms like CoSaMP and SP please refer to the paper mentioned
|
nikcleju@4
|
39 # above and the references of that paper.
|
nikcleju@4
|
40 #
|
nikcleju@4
|
41 #---------------------
|
nikcleju@4
|
42 # Original Matab code:
|
nikcleju@4
|
43 #
|
nikcleju@4
|
44 #colnorm=mean((sum(X.^2)).^(.5));
|
nikcleju@4
|
45 #X=X./colnorm;
|
nikcleju@4
|
46 #Y=Y./colnorm;
|
nikcleju@4
|
47 #[n,p]=size(X);
|
nikcleju@4
|
48 #delta=n/p;
|
nikcleju@4
|
49 #if nargin<3
|
nikcleju@4
|
50 # nsweep=300;
|
nikcleju@4
|
51 #end
|
nikcleju@4
|
52 #if nargin<4
|
nikcleju@4
|
53 # tol=0.00001;
|
nikcleju@4
|
54 #end
|
nikcleju@4
|
55 #if nargin<5 | xinitial==0
|
nikcleju@4
|
56 # xinitial = zeros(p,1);
|
nikcleju@4
|
57 #end
|
nikcleju@4
|
58 #if nargin<6 | ro==0
|
nikcleju@4
|
59 # ro=0.044417*delta^2+ 0.34142*delta+0.14844;
|
nikcleju@4
|
60 #end
|
nikcleju@4
|
61 #
|
nikcleju@4
|
62 #
|
nikcleju@4
|
63 #k1=floor(ro*n);
|
nikcleju@4
|
64 #k2=floor(ro*n);
|
nikcleju@4
|
65 #
|
nikcleju@4
|
66 #
|
nikcleju@4
|
67 ##initialization
|
nikcleju@4
|
68 #x1=xinitial;
|
nikcleju@4
|
69 #I=[];
|
nikcleju@4
|
70 #
|
nikcleju@4
|
71 #for sweep=1:nsweep
|
nikcleju@4
|
72 # r=Y-X*x1;
|
nikcleju@4
|
73 # c=X'*r;
|
nikcleju@4
|
74 # [csort, i_csort]=sort(abs(c));
|
nikcleju@4
|
75 # I=union(I,i_csort(end-k2+1:end));
|
nikcleju@4
|
76 # xt = X(:,I) \ Y;
|
nikcleju@4
|
77 # [xtsort, i_xtsort]=sort(abs(xt));
|
nikcleju@4
|
78 #
|
nikcleju@4
|
79 # J=I(i_xtsort(end-k1+1:end));
|
nikcleju@4
|
80 # x1=zeros(p,1);
|
nikcleju@4
|
81 # x1(J)=xt(i_xtsort(end-k1+1:end));
|
nikcleju@4
|
82 # I=J;
|
nikcleju@4
|
83 # if norm(Y-X*x1)/norm(Y)<tol
|
nikcleju@4
|
84 # break
|
nikcleju@4
|
85 # end
|
nikcleju@4
|
86 #
|
nikcleju@4
|
87 #end
|
nikcleju@4
|
88 #
|
nikcleju@4
|
89 #beta=x1;
|
nikcleju@4
|
90 #
|
nikcleju@4
|
91 # End of original Matab code
|
nikcleju@4
|
92 #----------------------------
|
nikcleju@4
|
93
|
nikcleju@4
|
94 #colnorm=mean((sum(X.^2)).^(.5));
|
nikcleju@4
|
95 colnorm = np.mean(np.sqrt((X**2).sum(0)))
|
nikcleju@4
|
96 X = X / colnorm
|
nikcleju@4
|
97 Y = Y / colnorm
|
nikcleju@4
|
98 [n,p] = X.shape
|
nikcleju@4
|
99 delta = float(n) / p
|
nikcleju@4
|
100 # if nargin<3
|
nikcleju@4
|
101 # nsweep=300;
|
nikcleju@4
|
102 # end
|
nikcleju@4
|
103 # if nargin<4
|
nikcleju@4
|
104 # tol=0.00001;
|
nikcleju@4
|
105 # end
|
nikcleju@4
|
106 # if nargin<5 | xinitial==0
|
nikcleju@4
|
107 # xinitial = zeros(p,1);
|
nikcleju@4
|
108 # end
|
nikcleju@4
|
109 # if nargin<6 | ro==0
|
nikcleju@4
|
110 # ro=0.044417*delta^2+ 0.34142*delta+0.14844;
|
nikcleju@4
|
111 # end
|
nikcleju@4
|
112 if xinitial is None:
|
nikcleju@4
|
113 xinitial = np.zeros(p)
|
nikcleju@30
|
114 if ro == None:
|
nikcleju@4
|
115 ro = 0.044417*delta**2 + 0.34142*delta + 0.14844
|
nikcleju@4
|
116
|
nikcleju@4
|
117 k1 = math.floor(ro*n)
|
nikcleju@4
|
118 k2 = math.floor(ro*n)
|
nikcleju@4
|
119
|
nikcleju@4
|
120 #initialization
|
nikcleju@4
|
121 x1 = xinitial.copy()
|
nikcleju@4
|
122 I = []
|
nikcleju@4
|
123
|
nikcleju@4
|
124 for sweep in np.arange(nsweep):
|
nikcleju@4
|
125 r = Y - np.dot(X,x1)
|
nikcleju@4
|
126 c = np.dot(X.T, r)
|
nikcleju@4
|
127 #[csort, i_csort] = np.sort(np.abs(c))
|
nikcleju@4
|
128 i_csort = np.argsort(np.abs(c))
|
nikcleju@4
|
129 #I = numpy.union1d(I , i_csort(end-k2+1:end))
|
nikcleju@4
|
130 I = np.union1d(I , i_csort[-k2:])
|
nikcleju@4
|
131 #xt = X[:,I] \ Y
|
nikcleju@4
|
132 # Make sure X[:,np.int_(I)] is a 2-dimensional matrix even if I has a single value (and therefore yields a column)
|
nikcleju@4
|
133 if I.size is 1:
|
nikcleju@4
|
134 a = np.reshape(X[:,np.int_(I)],(X.shape[0],1))
|
nikcleju@4
|
135 else:
|
nikcleju@4
|
136 a = X[:,np.int_(I)]
|
nikcleju@4
|
137 xt = np.linalg.lstsq(a, Y)[0]
|
nikcleju@4
|
138 #[xtsort, i_xtsort] = np.sort(np.abs(xt))
|
nikcleju@4
|
139 i_xtsort = np.argsort(np.abs(xt))
|
nikcleju@4
|
140
|
nikcleju@4
|
141 J = I[i_xtsort[-k1:]]
|
nikcleju@4
|
142 x1 = np.zeros(p)
|
nikcleju@4
|
143 x1[np.int_(J)] = xt[i_xtsort[-k1:]]
|
nikcleju@4
|
144 I = J.copy()
|
nikcleju@4
|
145 if np.linalg.norm(Y-np.dot(X,x1)) / np.linalg.norm(Y) < tol:
|
nikcleju@4
|
146 break
|
nikcleju@4
|
147 #end
|
nikcleju@4
|
148
|
nikcleju@4
|
149 #end
|
nikcleju@4
|
150
|
nikcleju@4
|
151 return x1.copy()
|
nikcleju@4
|
152
|