Mercurial > hg > pycsalgos
comparison scripts/ABSapprox.py @ 22:2dd78e37b23a
ABS approx script is working
Started working on parallel
author | nikcleju |
---|---|
date | Wed, 09 Nov 2011 00:11:14 +0000 |
parents | 45255b0a6dba |
children | c02eb33d2c54 |
comparison
equal
deleted
inserted
replaced
21:2805288d6656 | 22:2dd78e37b23a |
---|---|
4 | 4 |
5 @author: Nic | 5 @author: Nic |
6 """ | 6 """ |
7 | 7 |
8 import numpy as np | 8 import numpy as np |
9 import scipy.io | |
10 import math | |
11 import matplotlib.pyplot as plt | |
12 import matplotlib.cm as cm | |
9 import pyCSalgos | 13 import pyCSalgos |
10 import pyCSalgos.GAP.GAP | 14 import pyCSalgos.GAP.GAP |
11 import pyCSalgos.SL0.SL0_approx | 15 import pyCSalgos.SL0.SL0_approx |
12 | 16 |
13 # Define functions that prepare arguments for each algorithm call | 17 # Define functions that prepare arguments for each algorithm call |
14 def gap_paramsetup(y,M,Omega,epsilon,lbd): | 18 def run_gap(y,M,Omega,epsilon): |
15 gapparams = {"num_iteration" : 1000,\ | 19 gapparams = {"num_iteration" : 1000,\ |
16 "greedy_level" : 0.9,\ | 20 "greedy_level" : 0.9,\ |
17 "stopping_coefficient_size" : 1e-4,\ | 21 "stopping_coefficient_size" : 1e-4,\ |
18 "l2solver" : 'pseudoinverse',\ | 22 "l2solver" : 'pseudoinverse',\ |
19 "noise_level": epsilon} | 23 "noise_level": epsilon} |
20 return y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]) | 24 return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0] |
21 def sl0_paramsetup(y,M,Omega,epsilon,lbd): | 25 |
26 def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): | |
22 | 27 |
23 N,n = Omega.shape | 28 N,n = Omega.shape |
24 D = np.linalg.pinv(Omega) | 29 #D = np.linalg.pinv(Omega) |
25 U,S,Vt = np.linalg.svd(D) | 30 #U,S,Vt = np.linalg.svd(D) |
26 aggDupper = np.dot(M,D) | 31 aggDupper = np.dot(M,D) |
27 aggDlower = Vt[-(N-n):,:] | 32 aggDlower = Vt[-(N-n):,:] |
28 aggD = np.concatenate((aggDupper, lbd * aggDlower)) | 33 aggD = np.concatenate((aggDupper, lbd * aggDlower)) |
29 aggy = np.concatenate((y, np.zeros(N-n))) | 34 aggy = np.concatenate((y, np.zeros(N-n))) |
30 | 35 |
31 sigmamin = 0.01 | 36 sigmamin = 0.001 |
32 sigma_decrease_factor = 0.8 | 37 sigma_decrease_factor = 0.5 |
33 mu_0 = 2 | 38 mu_0 = 2 |
34 L = 10 | 39 L = 10 |
35 return aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L | 40 return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) |
36 | |
37 def post_multiply_with_D(D,gamma): | |
38 return np.dot(D,gamma) | |
39 def post_do_nothing(D,gamma): | |
40 return gamma | |
41 | 41 |
42 # Define tuples (algorithm setup function, algorithm function, name) | 42 # Define tuples (algorithm setup function, algorithm function, name) |
43 gap = (gap_paramsetup, pyCSalgos.GAP.GAP.GAP, post_do_nothing, 'GAP') | 43 gap = (run_gap, 'GAP') |
44 sl0 = (sl0_paramsetup, pyCSalgos.SL0.SL0_approx.SL0_approx, post_multiply_with_D, 'SL0_approx') | 44 sl0 = (run_sl0, 'SL0_approx') |
45 #sl0 = (sl0_paramsetup, lambda x: np.dot(x[0],x[1]()), 'SL0_approx') | 45 |
46 | 46 # Define which algorithms to run |
47 # Main function | 47 # 1. Algorithms not depending on lambda |
48 algosN = gap, # tuple | |
49 # 2. Algorithms depending on lambda (our ABS approach) | |
50 algosL = sl0, # tuple | |
51 | |
48 def mainrun(): | 52 def mainrun(): |
49 | 53 |
50 # Define which algorithms to run | 54 nalgosN = len(algosN) |
51 algos = (gap, sl0) | 55 nalgosL = len(algosL) |
52 numalgos = len(algos) | 56 |
53 | 57 #Set up experiment parameters |
54 # Set up experiment parameters | 58 d = 50; |
55 sigma = 2.0; | 59 sigma = 2.0 |
56 delta = 0.8; | 60 #deltas = np.arange(0.05,0.95,0.05) |
57 rho = 0.15; | 61 #rhos = np.arange(0.05,0.95,0.05) |
62 deltas = np.array([0.05,0.95]) | |
63 rhos = np.array([0.05,0.95]) | |
64 #deltas = np.array([0.05]) | |
65 #rhos = np.array([0.05]) | |
66 #delta = 0.8; | |
67 #rho = 0.15; | |
58 numvects = 10; # Number of vectors to generate | 68 numvects = 10; # Number of vectors to generate |
59 SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy | 69 SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy |
70 # Values for lambda | |
71 #lambdas = [0 10.^linspace(-5, 4, 10)]; | |
72 lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) | |
73 | |
74 meanmatrix = dict() | |
75 for i,algo in zip(np.arange(nalgosN),algosN): | |
76 meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) | |
77 for i,algo in zip(np.arange(nalgosL),algosL): | |
78 meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) | |
79 | |
80 for idelta,delta in zip(np.arange(deltas.size),deltas): | |
81 for irho,rho in zip(np.arange(rhos.size),rhos): | |
82 | |
83 # Generate data and operator | |
84 Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) | |
85 | |
86 # Run algorithms | |
87 mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) | |
88 | |
89 for algotuple in algosN: | |
90 meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] | |
91 if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): | |
92 meanmatrix[algotuple[1]][irho,idelta] = 0 | |
93 for algotuple in algosL: | |
94 for ilbd in np.arange(lambdas.size): | |
95 meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] | |
96 if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): | |
97 meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 | |
98 | |
99 # # Prepare matrices to show | |
100 # showmats = dict() | |
101 # for i,algo in zip(np.arange(nalgosN),algosN): | |
102 # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) | |
103 # for i,algo in zip(np.arange(nalgosL),algosL): | |
104 # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) | |
105 | |
106 # Save | |
107 tosave = dict() | |
108 tosave['meanmatrix'] = meanmatrix | |
109 tosave['d'] = d | |
110 tosave['sigma'] = sigma | |
111 tosave['deltas'] = deltas | |
112 tosave['rhos'] = rhos | |
113 tosave['numvects'] = numvects | |
114 tosave['SNRdb'] = SNRdb | |
115 tosave['lambdas'] = lambdas | |
116 try: | |
117 scipy.io.savemat('ABSapprox.mat',tosave) | |
118 except TypeError: | |
119 print "Oops, Type Error" | |
120 raise | |
121 # Show | |
122 for algotuple in algosN: | |
123 plt.figure() | |
124 plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest') | |
125 for algotuple in algosL: | |
126 for ilbd in np.arange(lambdas.size): | |
127 plt.figure() | |
128 plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest') | |
129 plt.show() | |
130 print "Finished." | |
131 | |
132 def genData(d,sigma,delta,rho,numvects,SNRdb): | |
60 | 133 |
61 # Process parameters | 134 # Process parameters |
62 noiselevel = 1.0 / (10.0**(SNRdb/10.0)); | 135 noiselevel = 1.0 / (10.0**(SNRdb/10.0)); |
63 d = 50; | |
64 p = round(sigma*d); | 136 p = round(sigma*d); |
65 m = round(delta*d); | 137 m = round(delta*d); |
66 l = round(d - rho*m); | 138 l = round(d - rho*m); |
67 | 139 |
68 # Generate Omega and data based on parameters | 140 # Generate Omega and data based on parameters |
69 Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); | 141 Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); |
70 # Optionally make Omega more coherent | 142 # Optionally make Omega more coherent |
71 #[U, S, Vt] = np.linalg.svd(Omega); | 143 U,S,Vt = np.linalg.svd(Omega); |
72 #Sdnew = np.diag(S) * (1+np.arange(np.diag(S).size)); % Make D coherent, not Omega! | 144 Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! |
73 #Snew = [diag(Sdnew); zeros(size(S,1) - size(S,2), size(S,2))]; | 145 Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) |
74 #Omega = U * Snew * V'; | 146 Omega = np.dot(U , np.dot(Snew,Vt)) |
75 | 147 |
76 # Generate data | 148 # Generate data |
77 x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); | 149 x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); |
78 | 150 |
79 # Values for lambda | 151 return Omega,x0,y,M,realnoise |
80 #lambdas = [0 10.^linspace(-5, 4, 10)]; | 152 |
81 lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) | 153 def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): |
154 | |
155 d = Omega.shape[1] | |
156 | |
157 nalgosN = len(algosN) | |
158 nalgosL = len(algosL) | |
82 | 159 |
83 xrec = dict() | 160 xrec = dict() |
84 err = dict() | 161 err = dict() |
85 relerr = dict() | 162 relerr = dict() |
86 for i,algo in zip(np.arange(numalgos),algos): | 163 |
87 xrec[algo[3]] = np.zeros((lambdas.size, d, y.shape[1])) | 164 # Prepare storage variables for algorithms non-Lambda |
88 err[algo[3]] = np.zeros((lambdas.size, y.shape[1])) | 165 for i,algo in zip(np.arange(nalgosN),algosN): |
89 relerr[algo[3]] = np.zeros((lambdas.size, y.shape[1])) | 166 xrec[algo[1]] = np.zeros((d, y.shape[1])) |
90 | 167 err[algo[1]] = np.zeros(y.shape[1]) |
168 relerr[algo[1]] = np.zeros(y.shape[1]) | |
169 # Prepare storage variables for algorithms with Lambda | |
170 for i,algo in zip(np.arange(nalgosL),algosL): | |
171 xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) | |
172 err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) | |
173 relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) | |
174 | |
175 # Run algorithms non-Lambda | |
176 for iy in np.arange(y.shape[1]): | |
177 for algofunc,strname in algosN: | |
178 epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) | |
179 xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) | |
180 err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) | |
181 relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) | |
182 for algotuple in algosN: | |
183 print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) | |
184 | |
185 # Run algorithms with Lambda | |
91 for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): | 186 for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): |
92 for iy in np.arange(y.shape[1]): | 187 for iy in np.arange(y.shape[1]): |
93 for algosetupfunc,algofunc,algopostfunc,strname in algos: | 188 D = np.linalg.pinv(Omega) |
189 U,S,Vt = np.linalg.svd(D) | |
190 for algofunc,strname in algosL: | |
94 epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) | 191 epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) |
95 | 192 gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) |
96 inparams = algosetupfunc(y[:,iy],M,Omega,epsilon,lbd) | 193 xrec[strname][ilbd,:,iy] = np.dot(D,gamma) |
97 xrec[strname][ilbd,:,iy] = algopostfunc(algofunc(*inparams)[0]) | |
98 | |
99 err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) | 194 err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) |
100 relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) | 195 relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) |
101 | |
102 print 'Lambda = ',lbd,' :' | 196 print 'Lambda = ',lbd,' :' |
103 for strname in relerr: | 197 for algotuple in algosL: |
104 print ' ',strname,' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) | 198 print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) |
105 | 199 |
106 | 200 # Prepare results |
107 | 201 mrelerrN = dict() |
202 for algotuple in algosN: | |
203 mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) | |
204 mrelerrL = dict() | |
205 for algotuple in algosL: | |
206 mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) | |
207 | |
208 return mrelerrN,mrelerrL | |
209 | |
108 # Script main | 210 # Script main |
109 if __name__ == "__main__": | 211 if __name__ == "__main__": |
110 mainrun() | 212 mainrun() |