Mercurial > hg > absrec
view test_approx.py @ 17:7fdf964f4edd
Added docstrings to files and functions
author | Nic Cleju <nikcleju@gmail.com> |
---|---|
date | Tue, 03 Apr 2012 16:27:18 +0300 |
parents | a27cfe83fe12 |
children | 4a967f4f18a0 |
line wrap: on
line source
# -*- coding: utf-8 -*- """ Main script for approximate reconstruction tests. Author: Nicolae Cleju """ __author__ = "Nicolae Cleju" __license__ = "GPL" __email__ = "nikcleju@gmail.com" import numpy import scipy.io import math import os import time import multiprocessing import sys # Try to do smart importing of matplotlib try: import matplotlib if os.name == 'nt': print "Importing matplotlib with default (GUI) backend... " else: print "Importing matplotlib with \"Cairo\" backend... " matplotlib.use('Cairo') import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.colors as mcolors except: print "FAIL" print "Importing matplotlib.pyplot failed. No figures at all" print "Try selecting a different backend" currmodule = sys.modules[__name__] printLock = None # Lock for printing in a thread-safe way # Thread-safe variable to store number of finished tasks proccount = multiprocessing.Value('I', 0) # 'I' = unsigned int, see docs (multiprocessing, array) # Contains pre-defined simulation parameters import stdparams_approx # Analysis operator and data generation functions import AnalysisGenerate # For exceptions import pyCSalgos.BP.l1qec import pyCSalgos.BP.l1qc import pyCSalgos.NESTA.NESTA def initProcess(share, ntasks, printLock): """ Pool initializer function (multiprocessing) Needed to pass the shared variable to the worker processes The variables must be global in the module in order to be seen later in run_once_tuple() see http://stackoverflow.com/questions/1675766/how-to-combine-pool-map-with-array-shared-memory-in-python-multiprocessing """ currmodule = sys.modules[__name__] currmodule.proccount = share currmodule.ntasks = ntasks currmodule._printLock = printLock def generateTaskParams(globalparams): """ Generate a list of task parameters (for parallel running) """ taskparams = [] SNRdb = globalparams['SNRdb'] sigma = globalparams['sigma'] d = globalparams['d'] deltas = globalparams['deltas'] rhos = globalparams['rhos'] numvects = globalparams['numvects'] algosN = globalparams['algosN'] algosL = globalparams['algosL'] lambdas = globalparams['lambdas'] # Process parameters noiselevel = 1.0 / (10.0**(SNRdb/10.0)); for delta in deltas: for rho in rhos: p = round(sigma*d); m = round(delta*d); l = round(d - rho*m); # Generate Omega and data based on parameters Omega = AnalysisGenerate.Generate_Analysis_Operator(d, p); # Optionally make Omega more coherent #U,S,Vt = numpy.linalg.svd(Omega); #Sdnew = S * (1+numpy.arange(S.size)) # Make D coherent, not Omega! #Snew = numpy.vstack((numpy.diag(Sdnew), numpy.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) #Omega = numpy.dot(U , numpy.dot(Snew,Vt)) # Generate data x0,y,M,Lambda,realnoise = AnalysisGenerate.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0') # Append task params taskparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) return taskparams def processResults(params, taskresults): """ Process the raw task results """ deltas = params['deltas'] rhos = params['rhos'] algosN = params['algosN'] algosL = params['algosL'] lambdas = params['lambdas'] meanmatrix = dict() elapsed = dict() for algo in algosN: meanmatrix[algo[1]] = numpy.zeros((rhos.size, deltas.size)) elapsed[algo[1]] = 0 for algo in algosL: meanmatrix[algo[1]] = numpy.zeros((lambdas.size, rhos.size, deltas.size)) elapsed[algo[1]] = numpy.zeros(lambdas.size) # Process results idx = 0 for idelta,delta in zip(numpy.arange(deltas.size),deltas): for irho,rho in zip(numpy.arange(rhos.size),rhos): mrelerrN,mrelerrL,addelapsed = taskresults[idx] idx = idx+1 for algotuple in algosN: meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): meanmatrix[algotuple[1]][irho,idelta] = 0 elapsed[algotuple[1]] = elapsed[algotuple[1]] + addelapsed[algotuple[1]] for algotuple in algosL: for ilbd in numpy.arange(lambdas.size): meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 elapsed[algotuple[1]][ilbd] = elapsed[algotuple[1]][ilbd] + addelapsed[algotuple[1]][ilbd] procresults = dict() procresults['meanmatrix'] = meanmatrix procresults['elapsed'] = elapsed return procresults def saveSim(params, procresults): """ Save simulation to mat file """ tosave = dict() tosave['meanmatrix'] = procresults['meanmatrix'] tosave['elapsed'] = procresults['elapsed'] tosave['d'] = params['d'] tosave['sigma'] = params['sigma'] tosave['deltas'] = params['deltas'] tosave['rhos'] = params['rhos'] tosave['numvects'] = params['numvects'] tosave['SNRdb'] = params['SNRdb'] tosave['lambdas'] = params['lambdas'] tosave['saveplotbase'] = params['saveplotbase'] tosave['saveplotexts'] = params['saveplotexts'] # Save algo names as cell array obj_arr = numpy.zeros((len(params['algosN']),), dtype=numpy.object) idx = 0 for algotuple in params['algosN']: obj_arr[idx] = algotuple[1] idx = idx+1 tosave['algosNnames'] = obj_arr obj_arr = numpy.zeros((len(params['algosL']),), dtype=numpy.object) idx = 0 for algotuple in params['algosL']: obj_arr[idx] = algotuple[1] idx = idx+1 tosave['algosLnames'] = obj_arr try: scipy.io.savemat(params['savedataname'], tosave) except: print "Save error" def loadSim(savedataname): """ Load simulation from mat file """ mdict = scipy.io.loadmat(savedataname) params = dict() procresults = dict() procresults['meanmatrix'] = mdict['meanmatrix'][0,0] procresults['elapsed'] = mdict['elapsed'] params['d'] = mdict['d'] params['sigma'] = mdict['sigma'] params['deltas'] = mdict['deltas'] params['rhos'] = mdict['rhos'] params['numvects'] = mdict['numvects'] params['SNRdb'] = mdict['SNRdb'] params['lambdas'] = mdict['lambdas'] params['saveplotbase'] = mdict['saveplotbase'][0] params['saveplotexts'] = mdict['saveplotexts'] algonames = mdict['algosNnames'][:,0] params['algosNnames'] = [] for algoname in algonames: params['algosNnames'].append(algoname[0]) algonames = mdict['algosLnames'][:,0] params['algosLnames'] = [] for algoname in algonames: params['algosLnames'].append(algoname[0]) return params, procresults def plot(savedataname): """ Plot results from a mat file. The files are saved in the current folder. """ params, procresults = loadSim(savedataname) meanmatrix = procresults['meanmatrix'] saveplotbase = params['saveplotbase'] saveplotexts = params['saveplotexts'] algosNnames = params['algosNnames'] algosLnames = params['algosLnames'] lambdas = params['lambdas'] for algoname in algosNnames: plt.figure() plt.imshow(meanmatrix[algoname], cmap=cm.gray, norm=mcolors.Normalize(0,1), interpolation='nearest',origin='lower') for ext in saveplotexts: plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') for algoname in algosLnames: for ilbd in numpy.arange(lambdas.size): plt.figure() plt.imshow(meanmatrix[algoname][ilbd], cmap=cm.gray, norm=mcolors.Normalize(0,1), interpolation='nearest',origin='lower') for ext in saveplotexts: plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') #========================== # Main function #========================== def run(params): """ Run simulation with given parameters """ print "This is analysis recovery ABS approximation script by Nic" print "Running simulation" algosN = params['algosN'] algosL = params['algosL'] d = params['d'] sigma = params['sigma'] deltas = params['deltas'] rhos = params['rhos'] lambdas = params['lambdas'] numvects = params['numvects'] SNRdb = params['SNRdb'] ncpus = params['ncpus'] savedataname = params['savedataname'] if ncpus is None: print " Running in parallel with default",multiprocessing.cpu_count(),"threads using \"multiprocessing\" package" if multiprocessing.cpu_count() == 1: doparallel = False else: doparallel = True elif ncpus > 1: print " Running in parallel with",ncpus,"threads using \"multiprocessing\" package" doparallel = True elif ncpus == 1: print "Running single thread" doparallel = False else: print "Wrong number of threads, exiting" return # Print summary of parameters print "Parameters:" print " Running algorithms",[algotuple[1] for algotuple in algosN],[algotuple[1] for algotuple in algosL] # Prepare parameters taskparams = generateTaskParams(params) # Store global variables currmodule.ntasks = len(taskparams) # Run taskresults = [] if doparallel: currmodule.printLock = multiprocessing.Lock() pool = multiprocessing.Pool(ncpus,initializer=initProcess,initargs=(currmodule.proccount,currmodule.ntasks,currmodule.printLock)) taskresults = pool.map(run_once_tuple, taskparams) else: for taskparam in taskparams: taskresults.append(run_once_tuple(taskparam)) # Process results procresults = processResults(params, taskresults) # Save saveSim(params, procresults) print "Finished." def run_once_tuple(t): """ Wrapper for run_once() that explodes the tuple argument t and shows the number of finished / remaining tasks """ # Call run_once() here results = run_once(*t) if currmodule.printLock: currmodule.printLock.acquire() currmodule.proccount.value = currmodule.proccount.value + 1 print "================================" print "Finished task",currmodule.proccount.value,"of",currmodule.ntasks print "================================" currmodule.printLock.release() return results def run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): """ Run single task (i.e. task function) """ d = Omega.shape[1] xrec = dict() err = dict() relerr = dict() elapsed = dict() # Prepare storage variables for algorithms non-Lambda for algo in algosN: xrec[algo[1]] = numpy.zeros((d, y.shape[1])) err[algo[1]] = numpy.zeros(y.shape[1]) relerr[algo[1]] = numpy.zeros(y.shape[1]) elapsed[algo[1]] = 0 # Prepare storage variables for algorithms with Lambda for algo in algosL: xrec[algo[1]] = numpy.zeros((lambdas.size, d, y.shape[1])) err[algo[1]] = numpy.zeros((lambdas.size, y.shape[1])) relerr[algo[1]] = numpy.zeros((lambdas.size, y.shape[1])) elapsed[algo[1]] = numpy.zeros(lambdas.size) # Run algorithms non-Lambda for iy in numpy.arange(y.shape[1]): for algofunc,strname in algosN: epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy]) try: timestart = time.time() xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) elapsed[strname] = elapsed[strname] + (time.time() - timestart) except pyCSalgos.BP.l1qec.l1qecInputValueError as e: print "Caught exception when running algorithm",strname," :",e.message except pyCSalgos.NESTA.NESTA.NestaError as e: print "Caught exception when running algorithm",strname," :",e.message err[strname][iy] = numpy.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) relerr[strname][iy] = err[strname][iy] / numpy.linalg.norm(x0[:,iy]) for algofunc,strname in algosN: print strname,' : avg relative error = ',numpy.mean(relerr[strname]) # Run algorithms with Lambda for ilbd,lbd in zip(numpy.arange(lambdas.size),lambdas): for iy in numpy.arange(y.shape[1]): D = numpy.linalg.pinv(Omega) #U,S,Vt = numpy.linalg.svd(D) for algofunc,strname in algosL: epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy]) try: timestart = time.time() #gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) gamma = algofunc(y[:,iy],M,Omega,epsilon,lbd) elapsed[strname][ilbd] = elapsed[strname][ilbd] + (time.time() - timestart) except pyCSalgos.BP.l1qc.l1qcInputValueError as e: print "Caught exception when running algorithm",strname," :",e.message xrec[strname][ilbd,:,iy] = numpy.dot(D,gamma) err[strname][ilbd,iy] = numpy.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / numpy.linalg.norm(x0[:,iy]) print 'Lambda = ',lbd,' :' for algofunc,strname in algosL: print ' ',strname,' : avg relative error = ',numpy.mean(relerr[strname][ilbd,:]) # Prepare results mrelerrN = dict() for algotuple in algosN: mrelerrN[algotuple[1]] = numpy.mean(relerr[algotuple[1]]) mrelerrL = dict() for algotuple in algosL: mrelerrL[algotuple[1]] = numpy.mean(relerr[algotuple[1]],1) return mrelerrN,mrelerrL,elapsed def generateFig(): """ Generates figures. Figures are saved in the current folder. """ run(stdparams_exact.std1) plot(stdparams_exact.std1['savedataname']) # Script main if __name__ == "__main__": generateFig() # Run test parameters #stdparams_approx.paramstest['ncpus'] = 1 #run(stdparams_approx.paramstest) #plot(stdparams_approx.paramstest['savedataname'])