Mercurial > hg > pycsalgos
comparison scripts/ABSapproxSingle.py @ 32:e1da5140c9a5
Count and display finished jobs in run_once_tuple()
Disabled "dictionary not normalized" message in OMP
Fixed bug that displayed same values for all algorithms
Made TST default iterations nsweep = 300
author | nikcleju |
---|---|
date | Fri, 11 Nov 2011 15:35:55 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
31:829bf04c92af | 32:e1da5140c9a5 |
---|---|
1 # -*- coding: utf-8 -*- | |
2 """ | |
3 Created on Sat Nov 05 18:08:40 2011 | |
4 | |
5 @author: Nic | |
6 """ | |
7 | |
8 import numpy as np | |
9 import scipy.io | |
10 import math | |
11 doplot = True | |
12 try: | |
13 import matplotlib.pyplot as plt | |
14 except: | |
15 doplot = False | |
16 if doplot: | |
17 import matplotlib.cm as cm | |
18 import pyCSalgos | |
19 import pyCSalgos.GAP.GAP | |
20 import pyCSalgos.SL0.SL0_approx | |
21 | |
22 # Define functions that prepare arguments for each algorithm call | |
23 def run_gap(y,M,Omega,epsilon): | |
24 gapparams = {"num_iteration" : 1000,\ | |
25 "greedy_level" : 0.9,\ | |
26 "stopping_coefficient_size" : 1e-4,\ | |
27 "l2solver" : 'pseudoinverse',\ | |
28 "noise_level": epsilon} | |
29 return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0] | |
30 | |
31 def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): | |
32 | |
33 N,n = Omega.shape | |
34 #D = np.linalg.pinv(Omega) | |
35 #U,S,Vt = np.linalg.svd(D) | |
36 aggDupper = np.dot(M,D) | |
37 aggDlower = Vt[-(N-n):,:] | |
38 aggD = np.concatenate((aggDupper, lbd * aggDlower)) | |
39 aggy = np.concatenate((y, np.zeros(N-n))) | |
40 | |
41 sigmamin = 0.001 | |
42 sigma_decrease_factor = 0.5 | |
43 mu_0 = 2 | |
44 L = 10 | |
45 return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) | |
46 | |
47 def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd): | |
48 | |
49 N,n = Omega.shape | |
50 #D = np.linalg.pinv(Omega) | |
51 #U,S,Vt = np.linalg.svd(D) | |
52 aggDupper = np.dot(M,D) | |
53 aggDlower = Vt[-(N-n):,:] | |
54 aggD = np.concatenate((aggDupper, lbd * aggDlower)) | |
55 aggy = np.concatenate((y, np.zeros(N-n))) | |
56 | |
57 sigmamin = 0.001 | |
58 sigma_decrease_factor = 0.5 | |
59 mu_0 = 2 | |
60 L = 10 | |
61 return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) | |
62 | |
63 | |
64 # Define tuples (algorithm setup function, algorithm function, name) | |
65 gap = (run_gap, 'GAP') | |
66 sl0 = (run_sl0, 'SL0_approx') | |
67 | |
68 # Define which algorithms to run | |
69 # 1. Algorithms not depending on lambda | |
70 algosN = gap, # tuple | |
71 # 2. Algorithms depending on lambda (our ABS approach) | |
72 algosL = sl0, # tuple | |
73 | |
74 def mainrun(): | |
75 | |
76 nalgosN = len(algosN) | |
77 nalgosL = len(algosL) | |
78 | |
79 #Set up experiment parameters | |
80 d = 50.0; | |
81 sigma = 2.0 | |
82 #deltas = np.arange(0.05,1.,0.05) | |
83 #rhos = np.arange(0.05,1.,0.05) | |
84 deltas = np.array([0.05, 0.45, 0.95]) | |
85 rhos = np.array([0.05, 0.45, 0.95]) | |
86 #deltas = np.array([0.05]) | |
87 #rhos = np.array([0.05]) | |
88 #delta = 0.8; | |
89 #rho = 0.15; | |
90 numvects = 100; # Number of vectors to generate | |
91 SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy | |
92 # Values for lambda | |
93 #lambdas = [0 10.^linspace(-5, 4, 10)]; | |
94 #lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) | |
95 lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000]) | |
96 | |
97 meanmatrix = dict() | |
98 for i,algo in zip(np.arange(nalgosN),algosN): | |
99 meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) | |
100 for i,algo in zip(np.arange(nalgosL),algosL): | |
101 meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) | |
102 | |
103 for idelta,delta in zip(np.arange(deltas.size),deltas): | |
104 for irho,rho in zip(np.arange(rhos.size),rhos): | |
105 | |
106 # Generate data and operator | |
107 Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) | |
108 | |
109 # Run algorithms | |
110 print "***** delta = ",delta," rho = ",rho | |
111 mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) | |
112 | |
113 for algotuple in algosN: | |
114 meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] | |
115 if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): | |
116 meanmatrix[algotuple[1]][irho,idelta] = 0 | |
117 for algotuple in algosL: | |
118 for ilbd in np.arange(lambdas.size): | |
119 meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] | |
120 if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): | |
121 meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 | |
122 | |
123 # # Prepare matrices to show | |
124 # showmats = dict() | |
125 # for i,algo in zip(np.arange(nalgosN),algosN): | |
126 # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) | |
127 # for i,algo in zip(np.arange(nalgosL),algosL): | |
128 # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) | |
129 | |
130 # Save | |
131 tosave = dict() | |
132 tosave['meanmatrix'] = meanmatrix | |
133 tosave['d'] = d | |
134 tosave['sigma'] = sigma | |
135 tosave['deltas'] = deltas | |
136 tosave['rhos'] = rhos | |
137 tosave['numvects'] = numvects | |
138 tosave['SNRdb'] = SNRdb | |
139 tosave['lambdas'] = lambdas | |
140 try: | |
141 scipy.io.savemat('ABSapprox.mat',tosave) | |
142 except TypeError: | |
143 print "Oops, Type Error" | |
144 raise | |
145 # Show | |
146 if doplot: | |
147 for algotuple in algosN: | |
148 plt.figure() | |
149 plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower') | |
150 for algotuple in algosL: | |
151 for ilbd in np.arange(lambdas.size): | |
152 plt.figure() | |
153 plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower') | |
154 plt.show() | |
155 print "Finished." | |
156 | |
157 def genData(d,sigma,delta,rho,numvects,SNRdb): | |
158 | |
159 # Process parameters | |
160 noiselevel = 1.0 / (10.0**(SNRdb/10.0)); | |
161 p = round(sigma*d); | |
162 m = round(delta*d); | |
163 l = round(d - rho*m); | |
164 | |
165 # Generate Omega and data based on parameters | |
166 Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); | |
167 # Optionally make Omega more coherent | |
168 U,S,Vt = np.linalg.svd(Omega); | |
169 Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! | |
170 Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) | |
171 Omega = np.dot(U , np.dot(Snew,Vt)) | |
172 | |
173 # Generate data | |
174 x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); | |
175 | |
176 return Omega,x0,y,M,realnoise | |
177 | |
178 def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): | |
179 | |
180 d = Omega.shape[1] | |
181 | |
182 nalgosN = len(algosN) | |
183 nalgosL = len(algosL) | |
184 | |
185 xrec = dict() | |
186 err = dict() | |
187 relerr = dict() | |
188 | |
189 # Prepare storage variables for algorithms non-Lambda | |
190 for i,algo in zip(np.arange(nalgosN),algosN): | |
191 xrec[algo[1]] = np.zeros((d, y.shape[1])) | |
192 err[algo[1]] = np.zeros(y.shape[1]) | |
193 relerr[algo[1]] = np.zeros(y.shape[1]) | |
194 # Prepare storage variables for algorithms with Lambda | |
195 for i,algo in zip(np.arange(nalgosL),algosL): | |
196 xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) | |
197 err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) | |
198 relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) | |
199 | |
200 # Run algorithms non-Lambda | |
201 for iy in np.arange(y.shape[1]): | |
202 for algofunc,strname in algosN: | |
203 epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) | |
204 xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) | |
205 err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) | |
206 relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) | |
207 for algotuple in algosN: | |
208 print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) | |
209 | |
210 # Run algorithms with Lambda | |
211 for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): | |
212 for iy in np.arange(y.shape[1]): | |
213 D = np.linalg.pinv(Omega) | |
214 U,S,Vt = np.linalg.svd(D) | |
215 for algofunc,strname in algosL: | |
216 epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) | |
217 gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) | |
218 xrec[strname][ilbd,:,iy] = np.dot(D,gamma) | |
219 err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) | |
220 relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) | |
221 print 'Lambda = ',lbd,' :' | |
222 for algotuple in algosL: | |
223 print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) | |
224 | |
225 # Prepare results | |
226 mrelerrN = dict() | |
227 for algotuple in algosN: | |
228 mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) | |
229 mrelerrL = dict() | |
230 for algotuple in algosL: | |
231 mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) | |
232 | |
233 return mrelerrN,mrelerrL | |
234 | |
235 # Script main | |
236 if __name__ == "__main__": | |
237 | |
238 #import cProfile | |
239 #cProfile.run('mainrun()', 'profile') | |
240 mainrun() |