comparison scripts/ABSapprox.py @ 32:e1da5140c9a5

Count and display finished jobs in run_once_tuple() Disabled "dictionary not normalized" message in OMP Fixed bug that displayed same values for all algorithms Made TST default iterations nsweep = 300
author nikcleju
date Fri, 11 Nov 2011 15:35:55 +0000
parents 829bf04c92af
children 116dcfacd1cc
comparison
equal deleted inserted replaced
31:829bf04c92af 32:e1da5140c9a5
81 aggDupper = np.dot(M,D) 81 aggDupper = np.dot(M,D)
82 aggDlower = Vt[-(N-n):,:] 82 aggDlower = Vt[-(N-n):,:]
83 aggD = np.concatenate((aggDupper, lbd * aggDlower)) 83 aggD = np.concatenate((aggDupper, lbd * aggDlower))
84 aggy = np.concatenate((y, np.zeros(N-n))) 84 aggy = np.concatenate((y, np.zeros(N-n)))
85 85
86 return pyCSalgos.RecomTST.RecommendedTST.RecommendedTST(aggD, aggy, nsweep=3000, tol=epsilon / np.linalg.norm(aggy)) 86 nsweep = 300
87 tol = epsilon / np.linalg.norm(aggy)
88 return pyCSalgos.RecomTST.RecommendedTST.RecommendedTST(aggD, aggy, nsweep=nsweep, tol=tol)
87 89
88 #========================== 90 #==========================
89 # Define tuples (algorithm function, name) 91 # Define tuples (algorithm function, name)
90 #========================== 92 #==========================
91 gap = (run_gap, 'GAP') 93 gap = (run_gap, 'GAP')
96 98
97 # Define which algorithms to run 99 # Define which algorithms to run
98 # 1. Algorithms not depending on lambda 100 # 1. Algorithms not depending on lambda
99 algosN = gap, # tuple 101 algosN = gap, # tuple
100 # 2. Algorithms depending on lambda (our ABS approach) 102 # 2. Algorithms depending on lambda (our ABS approach)
101 algosL = sl0,bp,ompeps,tst # tuple 103 #algosL = sl0,bp,ompeps,tst # tuple
104 algosL = sl0,tst
105
106 #==========================
107 # Pool initializer function (multiprocessing)
108 # Needed to pass the shared variable to the worker processes
109 # The variables must be global in the module in order to be seen later in run_once_tuple()
110 # see http://stackoverflow.com/questions/1675766/how-to-combine-pool-map-with-array-shared-memory-in-python-multiprocessing
111 #==========================
112 def initProcess(share, njobs):
113 import sys
114 currmodule = sys.modules[__name__]
115 currmodule.proccount = share
116 currmodule.njobs = njobs
102 117
103 #========================== 118 #==========================
104 # Interface functions 119 # Interface functions
105 #========================== 120 #==========================
106 def run_multiproc(ncpus=None): 121 def run_multiproc(ncpus=None):
117 132
118 def standard_params(): 133 def standard_params():
119 #Set up standard experiment parameters 134 #Set up standard experiment parameters
120 d = 50.0; 135 d = 50.0;
121 sigma = 2.0 136 sigma = 2.0
122 deltas = np.arange(0.05,1.,0.05) 137 #deltas = np.arange(0.05,1.,0.05)
123 rhos = np.arange(0.05,1.,0.05) 138 #rhos = np.arange(0.05,1.,0.05)
124 #deltas = np.array([0.05, 0.45, 0.95]) 139 deltas = np.array([0.05, 0.45, 0.95])
125 #rhos = np.array([0.05, 0.45, 0.95]) 140 rhos = np.array([0.05, 0.45, 0.95])
126 #deltas = np.array([0.05]) 141 #deltas = np.array([0.05])
127 #rhos = np.array([0.05]) 142 #rhos = np.array([0.05])
128 #delta = 0.8; 143 #delta = 0.8;
129 #rho = 0.15; 144 #rho = 0.15;
130 numvects = 100; # Number of vectors to generate 145 numvects = 100; # Number of vectors to generate
156 171
157 print "This is analysis recovery ABS approximation script by Nic" 172 print "This is analysis recovery ABS approximation script by Nic"
158 print "Running phase transition ( run_multi() )" 173 print "Running phase transition ( run_multi() )"
159 174
160 if doparallel: 175 if doparallel:
161 from multiprocessing import Pool 176 import multiprocessing
177 # Shared value holding the number of finished processes
178 # Add it as global of the module
179 import sys
180 currmodule = sys.modules[__name__]
181 currmodule.proccount = multiprocessing.Value('I', 0) # 'I' = unsigned int, see docs (multiprocessing, array)
162 182
163 if dosaveplot or doshowplot: 183 if dosaveplot or doshowplot:
164 try: 184 try:
165 import matplotlib 185 import matplotlib
166 if doshowplot: 186 if doshowplot:
216 Omega,x0,y,M,realnoise = generateData(d,sigma,delta,rho,numvects,SNRdb) 236 Omega,x0,y,M,realnoise = generateData(d,sigma,delta,rho,numvects,SNRdb)
217 237
218 #Save the parameters, and run after 238 #Save the parameters, and run after
219 print " delta = ",delta," rho = ",rho 239 print " delta = ",delta," rho = ",rho
220 jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) 240 jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0))
221 241
242 if doparallel:
243 currmodule.njobs = deltas.size * rhos.size
222 print "End of parameters" 244 print "End of parameters"
223 245
224 # Run 246 # Run
225 jobresults = [] 247 jobresults = []
248
226 if doparallel: 249 if doparallel:
227 pool = Pool(4) 250 pool = multiprocessing.Pool(4,initializer=initProcess,initargs=(currmodule.proccount,currmodule.njobs))
228 jobresults = pool.map(run_once_tuple,jobparams) 251 jobresults = pool.map(run_once_tuple, jobparams)
229 else: 252 else:
230 for jobparam in jobparams: 253 for jobparam in jobparams:
231 jobresults.append(run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0)) 254 jobresults.append(run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0))
232 255
233 # Read results 256 # Read results
243 for algotuple in algosL: 266 for algotuple in algosL:
244 for ilbd in np.arange(lambdas.size): 267 for ilbd in np.arange(lambdas.size):
245 meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] 268 meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd]
246 if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): 269 if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]):
247 meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 270 meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0
248 271
249 # # Prepare matrices to show 272 # Save
250 # showmats = dict() 273 if dosavedata:
251 # for i,algo in zip(np.arange(nalgosN),algosN): 274 tosave = dict()
252 # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) 275 tosave['meanmatrix'] = meanmatrix
253 # for i,algo in zip(np.arange(nalgosL),algosL): 276 tosave['d'] = d
254 # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) 277 tosave['sigma'] = sigma
255 278 tosave['deltas'] = deltas
256 # Save 279 tosave['rhos'] = rhos
257 if dosavedata: 280 tosave['numvects'] = numvects
258 tosave = dict() 281 tosave['SNRdb'] = SNRdb
259 tosave['meanmatrix'] = meanmatrix 282 tosave['lambdas'] = lambdas
260 tosave['d'] = d 283 try:
261 tosave['sigma'] = sigma 284 scipy.io.savemat(savedataname, tosave)
262 tosave['deltas'] = deltas 285 except:
263 tosave['rhos'] = rhos 286 print "Save error"
264 tosave['numvects'] = numvects
265 tosave['SNRdb'] = SNRdb
266 tosave['lambdas'] = lambdas
267 try:
268 scipy.io.savemat(savedataname, tosave)
269 except:
270 print "Save error"
271 # Show 287 # Show
272 if doshowplot or dosaveplot: 288 if doshowplot or dosaveplot:
273 for algotuple in algosN: 289 for algotuple in algosN:
274 algoname = algotuple[1] 290 algoname = algotuple[1]
275 plt.figure() 291 plt.figure()
289 plt.show() 305 plt.show()
290 306
291 print "Finished." 307 print "Finished."
292 308
293 def run_once_tuple(t): 309 def run_once_tuple(t):
294 return run_once(*t) 310 results = run_once(*t)
311 import sys
312 currmodule = sys.modules[__name__]
313 currmodule.proccount.value = currmodule.proccount.value + 1
314 print "================================"
315 print "Finished job",currmodule.proccount.value,"of",currmodule.njobs
316 print "================================"
317 return results
295 318
296 def run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): 319 def run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0):
297 320
298 d = Omega.shape[1] 321 d = Omega.shape[1]
299 322
320 for algofunc,strname in algosN: 343 for algofunc,strname in algosN:
321 epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) 344 epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
322 xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) 345 xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon)
323 err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) 346 err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy])
324 relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) 347 relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy])
325 for algotuple in algosN: 348 for algofunc,strname in algosN:
326 print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) 349 print strname,' : avg relative error = ',np.mean(relerr[strname])
327 350
328 # Run algorithms with Lambda 351 # Run algorithms with Lambda
329 for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): 352 for ilbd,lbd in zip(np.arange(lambdas.size),lambdas):
330 for iy in np.arange(y.shape[1]): 353 for iy in np.arange(y.shape[1]):
331 D = np.linalg.pinv(Omega) 354 D = np.linalg.pinv(Omega)
335 gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) 358 gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd)
336 xrec[strname][ilbd,:,iy] = np.dot(D,gamma) 359 xrec[strname][ilbd,:,iy] = np.dot(D,gamma)
337 err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) 360 err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy])
338 relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) 361 relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy])
339 print 'Lambda = ',lbd,' :' 362 print 'Lambda = ',lbd,' :'
340 for algotuple in algosL: 363 for algofunc,strname in algosL:
341 print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) 364 print ' ',strname,' : avg relative error = ',np.mean(relerr[strname][ilbd,:])
342 365
343 # Prepare results 366 # Prepare results
344 mrelerrN = dict() 367 mrelerrN = dict()
345 for algotuple in algosN: 368 for algotuple in algosN:
346 mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) 369 mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]])