Mercurial > hg > absrec
view test_approx.py @ 19:2837cfeaf353
Fixed plot functions in utils.py
Started working for test_approx.py
author | Nic Cleju <nikcleju@gmail.com> |
---|---|
date | Thu, 05 Apr 2012 13:59:22 +0300 |
parents | 4a967f4f18a0 |
children | d395461b92ae |
line wrap: on
line source
# -*- coding: utf-8 -*- """ Main script for approximate reconstruction tests. Author: Nicolae Cleju """ __author__ = "Nicolae Cleju" __license__ = "GPL" __email__ = "nikcleju@gmail.com" import numpy import scipy.io import math import os import time import multiprocessing import sys # Try to do smart importing of matplotlib try: import matplotlib if os.name == 'nt': print "Importing matplotlib with default (GUI) backend... " else: print "Importing matplotlib with \"Cairo\" backend... " matplotlib.use('Cairo') import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.colors as mcolors except: print "FAIL" print "Importing matplotlib.pyplot failed. No figures at all" print "Try selecting a different backend" currmodule = sys.modules[__name__] printLock = None # Lock for printing in a thread-safe way # Thread-safe variable to store number of finished tasks proccount = multiprocessing.Value('I', 0) # 'I' = unsigned int, see docs (multiprocessing, array) # Contains pre-defined simulation parameters import stdparams_approx # Analysis operator and data generation functions import AnalysisGenerate # For exceptions import pyCSalgos.BP.l1qec import pyCSalgos.BP.l1qc import pyCSalgos.NESTA.NESTA # For plotting with right axes import utils def initProcess(share, ntasks, printLock): """ Pool initializer function (multiprocessing) Needed to pass the shared variable to the worker processes The variables must be global in the module in order to be seen later in run_once_tuple() see http://stackoverflow.com/questions/1675766/how-to-combine-pool-map-with-array-shared-memory-in-python-multiprocessing """ currmodule = sys.modules[__name__] currmodule.proccount = share currmodule.ntasks = ntasks currmodule._printLock = printLock def generateTaskParams(globalparams): """ Generate a list of task parameters (for parallel running) """ taskparams = [] SNRdb = globalparams['SNRdb'] sigma = globalparams['sigma'] d = globalparams['d'] deltas = globalparams['deltas'] rhos = globalparams['rhos'] numvects = globalparams['numvects'] algosN = globalparams['algosN'] algosL = globalparams['algosL'] lambdas = globalparams['lambdas'] # Process parameters noiselevel = 1.0 / (10.0**(SNRdb/10.0)); for delta in deltas: for rho in rhos: p = round(sigma*d); m = round(delta*d); l = round(d - rho*m); # Generate Omega and data based on parameters Omega = AnalysisGenerate.Generate_Analysis_Operator(d, p); # Optionally make Omega more coherent #U,S,Vt = numpy.linalg.svd(Omega); #Sdnew = S * (1+numpy.arange(S.size)) # Make D coherent, not Omega! #Snew = numpy.vstack((numpy.diag(Sdnew), numpy.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) #Omega = numpy.dot(U , numpy.dot(Snew,Vt)) # Generate data x0,y,M,Lambda,realnoise = AnalysisGenerate.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0') # Append task params taskparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) return taskparams def processResults(params, taskparams, taskresults): """ Process the raw task results """ deltas = params['deltas'] rhos = params['rhos'] algosN = params['algosN'] algosL = params['algosL'] lambdas = params['lambdas'] numvects = params['numvects'] err = dict() relerr = dict() mrelerrN = dict() mrelerrL = dict() meanmatrix = dict() elapsed = dict() # Prepare storage variables for algorithms non-Lambda for algo in algosN: err[algo[1]] = numpy.zeros(numvects) relerr[algo[1]] = numpy.zeros(numvects) meanmatrix[algo[1]] = numpy.zeros((rhos.size, deltas.size)) elapsed[algo[1]] = 0 # Prepare storage variables for algorithms with Lambda for algo in algosL: err[algo[1]] = numpy.zeros((lambdas.size, numvects)) relerr[algo[1]] = numpy.zeros((lambdas.size, numvects)) meanmatrix[algo[1]] = numpy.zeros((lambdas.size, rhos.size, deltas.size)) elapsed[algo[1]] = numpy.zeros(lambdas.size) # Process results idx = 0 for idelta,delta in zip(numpy.arange(deltas.size),deltas): for irho,rho in zip(numpy.arange(rhos.size),rhos): algosN,algosL,Omega,y,lambdas,realnoise,M,x0 = taskparams[idx] xrec,addelapsed = taskresults[idx] idx = idx+1 # Optionally compare not with original signal x0, but with solution of # another algorithm (e.g. GAP, NESTA etc) if 'reference_signal' in params: xref = xrec[params['reference_signal']] else: xref = x0 # Compute errors and relative errors for iy in numpy.arange(y.shape[1]): for algofunc,algoname in algosN: err[algoname][iy] = numpy.linalg.norm(xref[:,iy] - xrec[algoname][:,iy]) relerr[algoname][iy] = err[algoname][iy] / numpy.linalg.norm(xref[:,iy]) for ilbd,lbd in zip(numpy.arange(lambdas.size),lambdas): for iy in numpy.arange(y.shape[1]): for algofunc,algoname in algosL: err[algoname][ilbd,iy] = numpy.linalg.norm(xref[:,iy] - xrec[algoname][ilbd,:,iy]) relerr[algoname][ilbd,iy] = err[algoname][ilbd,iy] / numpy.linalg.norm(xref[:,iy]) # Compute average relative errors and prepare matrix to plot for algofunc,algoname in algosN: mrelerrN[algoname] = numpy.mean(relerr[algoname]) meanmatrix[algoname][irho,idelta] = 1 - mrelerrN[algoname] if meanmatrix[algoname][irho,idelta] < 0 or math.isnan(meanmatrix[algoname][irho,idelta]): meanmatrix[algoname][irho,idelta] = 0 elapsed[algoname] = elapsed[algoname] + addelapsed[algoname] for algofunc, algoname in algosL: for ilbd in numpy.arange(lambdas.size): mrelerrL[algoname] = numpy.mean(relerr[algoname],1) meanmatrix[algoname][ilbd,irho,idelta] = 1 - mrelerrL[algoname][ilbd] if meanmatrix[algoname][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algoname][ilbd,irho,idelta]): meanmatrix[algoname][ilbd,irho,idelta] = 0 elapsed[algoname][ilbd] = elapsed[algoname][ilbd] + addelapsed[algoname][ilbd] procresults = dict() procresults['meanmatrix'] = meanmatrix procresults['elapsed'] = elapsed return procresults def saveSim(params, procresults): """ Save simulation to mat file """ tosave = dict() tosave['meanmatrix'] = procresults['meanmatrix'] tosave['elapsed'] = procresults['elapsed'] tosave['d'] = params['d'] tosave['sigma'] = params['sigma'] tosave['deltas'] = params['deltas'] tosave['rhos'] = params['rhos'] tosave['numvects'] = params['numvects'] tosave['SNRdb'] = params['SNRdb'] tosave['lambdas'] = params['lambdas'] tosave['saveplotbase'] = params['saveplotbase'] tosave['saveplotexts'] = params['saveplotexts'] # Save algo names as cell array obj_arr = numpy.zeros((len(params['algosN']),), dtype=numpy.object) idx = 0 for algotuple in params['algosN']: obj_arr[idx] = algotuple[1] idx = idx+1 tosave['algosNnames'] = obj_arr obj_arr = numpy.zeros((len(params['algosL']),), dtype=numpy.object) idx = 0 for algotuple in params['algosL']: obj_arr[idx] = algotuple[1] idx = idx+1 tosave['algosLnames'] = obj_arr try: scipy.io.savemat(params['savedataname'], tosave) except: print "Save error" def loadSim(savedataname): """ Load simulation from mat file """ mdict = scipy.io.loadmat(savedataname) params = dict() procresults = dict() procresults['meanmatrix'] = mdict['meanmatrix'][0,0] procresults['elapsed'] = mdict['elapsed'] params['d'] = mdict['d'] params['sigma'] = mdict['sigma'] params['deltas'] = mdict['deltas'] params['rhos'] = mdict['rhos'] params['numvects'] = mdict['numvects'] params['SNRdb'] = mdict['SNRdb'] params['lambdas'] = mdict['lambdas'] params['saveplotbase'] = mdict['saveplotbase'][0] params['saveplotexts'] = mdict['saveplotexts'] algonames = mdict['algosNnames'][:,0] params['algosNnames'] = [] for algoname in algonames: params['algosNnames'].append(algoname[0]) algonames = mdict['algosLnames'][:,0] params['algosLnames'] = [] for algoname in algonames: params['algosLnames'].append(algoname[0]) return params, procresults def plot(savedataname): """ Plot results from a mat file. The files are saved in the current folder. """ params, procresults = loadSim(savedataname) meanmatrix = procresults['meanmatrix'] saveplotbase = params['saveplotbase'] saveplotexts = params['saveplotexts'] algosNnames = params['algosNnames'] algosLnames = params['algosLnames'] lambdas = params['lambdas'] for algoname in algosNnames: plt.figure() plt.imshow(meanmatrix[algoname], cmap=cm.gray, norm=mcolors.Normalize(0,1), interpolation='nearest',origin='lower') for ext in saveplotexts: plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') for algoname in algosLnames: for ilbd in numpy.arange(lambdas.size): plt.figure() plt.imshow(meanmatrix[algoname][ilbd], cmap=cm.gray, norm=mcolors.Normalize(0,1), interpolation='nearest',origin='lower') for ext in saveplotexts: plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') def plotProveEll1(savedataname): """ Plot ... The files are saved in the current folder. """ params, procresults = loadSim(savedataname) meanmatrix = procresults['meanmatrix'] saveplotbase = params['saveplotbase'] saveplotexts = params['saveplotexts'] algosNnames = params['algosNnames'] algosLnames = params['algosLnames'] lambdas = params['lambdas'] toplot = numpy.zeros((len(lambdas),len(algosNnames) + len(algosLnames))) idxcol = 0 for algoname in algosNnames: toplot[:,idxcol] = (1 - meanmatrix[algoname][0,0]) * numpy.ones(len(lambdas)) idxcol = idxcol + 1 for algoname in algosLnames: for ilbd in numpy.arange(len(lambdas)): toplot[ilbd,idxcol] = 1 - meanmatrix[algoname][ilbd][0,0] idxcol = idxcol + 1 plt.figure() plt.plot(toplot) for ext in saveplotexts: plt.savefig(saveplotbase + '.' + ext, bbox_inches='tight') #========================== # Main function #========================== def run(params): """ Run simulation with given parameters """ print "This is analysis recovery ABS approximation script by Nic" print "Running simulation" algosN = params['algosN'] algosL = params['algosL'] d = params['d'] sigma = params['sigma'] deltas = params['deltas'] rhos = params['rhos'] lambdas = params['lambdas'] numvects = params['numvects'] SNRdb = params['SNRdb'] if 'ncpus' in params: ncpus = params['ncpus'] else: ncpus = None savedataname = params['savedataname'] if ncpus is None: print " Running in parallel with default",multiprocessing.cpu_count(),"threads using \"multiprocessing\" package" if multiprocessing.cpu_count() == 1: doparallel = False else: doparallel = True elif ncpus > 1: print " Running in parallel with",ncpus,"threads using \"multiprocessing\" package" doparallel = True elif ncpus == 1: print "Running single thread" doparallel = False else: print "Wrong number of threads, exiting" return # Print summary of parameters print "Parameters:" print " Running algorithms",[algotuple[1] for algotuple in algosN],[algotuple[1] for algotuple in algosL] # Prepare parameters print "Generating task parameters..." taskparams = generateTaskParams(params) # Store global variables currmodule.ntasks = len(taskparams) # Run print "Running..." taskresults = [] if doparallel: currmodule.printLock = multiprocessing.Lock() pool = multiprocessing.Pool(ncpus,initializer=initProcess,initargs=(currmodule.proccount,currmodule.ntasks,currmodule.printLock)) taskresults = pool.map(run_once_tuple, taskparams) else: for taskparam in taskparams: taskresults.append(run_once_tuple(taskparam)) # Process results procresults = processResults(params, taskparams, taskresults) # Save saveSim(params, procresults) print "Finished." def run_once_tuple(t): """ Wrapper for run_once() that explodes the tuple argument t and shows the number of finished / remaining tasks """ # Call run_once() here results = run_once(*t) if currmodule.printLock: currmodule.printLock.acquire() currmodule.proccount.value = currmodule.proccount.value + 1 print "================================" print "Finished task",currmodule.proccount.value,"of",currmodule.ntasks print "================================" currmodule.printLock.release() return results def run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): """ Run single task (i.e. task function) """ d = Omega.shape[1] xrec = dict() # err = dict() # relerr = dict() elapsed = dict() # Prepare storage variables for algorithms non-Lambda for algo in algosN: xrec[algo[1]] = numpy.zeros((d, y.shape[1])) elapsed[algo[1]] = 0 # Prepare storage variables for algorithms with Lambda for algo in algosL: xrec[algo[1]] = numpy.zeros((lambdas.size, d, y.shape[1])) elapsed[algo[1]] = numpy.zeros(lambdas.size) # Run algorithms non-Lambda for iy in numpy.arange(y.shape[1]): for algofunc,strname in algosN: epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy]) try: timestart = time.time() xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) elapsed[strname] = elapsed[strname] + (time.time() - timestart) except pyCSalgos.BP.l1qec.l1qecInputValueError as e: print "Caught exception when running algorithm",strname," :",e.message except pyCSalgos.NESTA.NESTA.NestaError as e: print "Caught exception when running algorithm",strname," :",e.message # Run algorithms with Lambda for ilbd,lbd in zip(numpy.arange(lambdas.size),lambdas): for iy in numpy.arange(y.shape[1]): D = numpy.linalg.pinv(Omega) #U,S,Vt = numpy.linalg.svd(D) for algofunc,strname in algosL: epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy]) try: timestart = time.time() #gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) xrec[strname][ilbd][:,iy] = algofunc(y[:,iy],M,Omega,epsilon,lbd) elapsed[strname][ilbd] = elapsed[strname][ilbd] + (time.time() - timestart) except pyCSalgos.BP.l1qc.l1qcInputValueError as e: print "Caught exception when running algorithm",strname," :",e.message return xrec, elapsed def generateFigProveEll1(): """ Generates figure xxx (prove theorem IV.2 in the ell_1 case). Figures are saved in the current folder. """ #paramsl1prove['reference_signal'] = nesta[1] # 'NESTA' run(stdparams_approx.paramsl1prove) plotProveEll1(stdparams_approx.paramsl1prove['savedataname']) def generateFig(): """ Generates figures. Figures are saved in the current folder. """ run(stdparams_exact.std1) plot(stdparams_exact.std1['savedataname']) # Script main if __name__ == "__main__": #stdparams_approx.paramsl1prove['ncpus'] = 1 #generateFigProveEll1() #generateFig() # Run test parameters #stdparams_approx.paramstest['ncpus'] = 1 #run(stdparams_approx.paramstest) #plot(stdparams_approx.paramstest['savedataname']) utils.replot_approx(stdparams_approx.paramstest['savedataname'], algonames = None, # will read them from mat file doshow=False, dosave=True, saveplotbase=stdparams_approx.paramstest['saveplotbase'], saveplotexts=stdparams_approx.paramstest['saveplotexts'])