Mercurial > hg > pycsalgos
changeset 56:de6299dcb49e
Changed directory structure - part 5
author | nikcleju |
---|---|
date | Wed, 14 Dec 2011 14:51:35 +0000 |
parents | 020399d027b1 |
children | 6e51880715f3 |
files | apps/omp_app.py scripts/ABSapprox.py scripts/ABSapproxMultiproc.py scripts/ABSapproxPP.py scripts/ABSapproxSingle.py scripts/algos.py scripts/stdparams.py scripts/study_analysis_rec_algos_noisy.m scripts/study_analysis_rec_approx.m scripts/utils.py |
diffstat | 10 files changed, 0 insertions(+), 2651 deletions(-) [+] |
line wrap: on
line diff
--- a/apps/omp_app.py Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,149 +0,0 @@ -""" -#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=# -# Bob L. Sturm <bst@create.aau.dk> 20111018 -# Department of Architecture, Design and Media Technology -# Aalborg University Copenhagen -# Lautrupvang 15, 2750 Ballerup, Denmark -#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=# -""" - -import numpy as np -from sklearn.utils import check_random_state -import time - -from omp_sk_bugfix import orthogonal_mp -from omp_QR import greed_omp_qr -from omp_QR import omp_qr - -""" -Run a problem suite involving sparse vectors in -ambientDimension dimensional space, with a resolution -in the phase plane of numGradations x numGradations, -and at each indeterminacy and sparsity pair run -numTrials independent trials. - -Outputs a text file denoting successes at each phase point. -For more on phase transitions, see: -D. L. Donoho and J. Tanner, "Precise undersampling theorems," -Proc. IEEE, vol. 98, no. 6, pp. 913-924, June 2010. -""" - -def runProblemSuite(ambientDimension,numGradations,numTrials): - - idx = np.arange(ambientDimension) - phaseDelta = np.linspace(0.05,1,numGradations) - phaseRho = np.linspace(0.05,1,numGradations) - success = np.zeros((numGradations, numGradations)) - - #Nic: init timers - t1all = 0 - t2all = 0 - t3all = 0 - - deltaCounter = 0 - # delta is number of measurements/ - for delta in phaseDelta[:17]: - rhoCounter = 0 - for rho in phaseRho: - print(deltaCounter,rhoCounter) - numMeasurements = int(delta*ambientDimension) - sparsity = int(rho*numMeasurements) - # how do I set the following to be random each time? - generator = check_random_state(100) - # create unit norm dictionary - D = generator.randn(numMeasurements, ambientDimension) - D /= np.sqrt(np.sum((D ** 2), axis=0)) - # compute Gramian (for efficiency) - DTD = np.dot(D.T,D) - - successCounter = 0 - trial = numTrials - while trial > 0: - # generate sparse signal with a minimum non-zero value - x = np.zeros((ambientDimension, 1)) - idx2 = idx - generator.shuffle(idx2) - idx3 = idx2[:sparsity] - while np.min(np.abs(x[idx3,0])) < 1e-10 : - x[idx3,0] = generator.randn(sparsity) - # sense sparse signal - y = np.dot(D, x) - - # Nic: Use sparsify OMP function (translated from Matlab) - ompopts = dict({'stopCrit':'M', 'stopTol':2*sparsity}) - starttime = time.time() # start timer - x_r2, errs, times = greed_omp_qr(y.squeeze().copy(), D.copy(), D.shape[1], ompopts) - t2all = t2all + time.time() - starttime # stop timer - idx_r2 = np.nonzero(x_r2)[0] - - # run to two times expected sparsity, or tolerance - # why? Often times, OMP can retrieve the correct solution - # when it is run for more than the expected sparsity - #x_r, idx_r = omp_qr(y,D,DTD,2*sparsity,1e-5) - # Nic: adjust tolerance to match with other function - starttime = time.time() # start timer - x_r, idx_r = omp_qr(y.copy(),D.copy(),DTD.copy(),2*sparsity,numMeasurements*1e-14/np.vdot(y,y)) - t1all = t1all + time.time() - starttime # stop timer - - # Nic: test sklearn omp - starttime = time.time() # start timer - x_r3 = orthogonal_mp(D.copy(), y.copy(), n_nonzero_coefs=2*sparsity, tol=numMeasurements*1e-14, precompute_gram=False, copy_X=True) - idx_r3 = np.nonzero(x_r3)[0] - t3all = t3all + time.time() - starttime # stop timer - - # Nic: compare results - print 'diff1 = ',np.linalg.norm(x_r.squeeze() - x_r2.squeeze()) - print 'diff2 = ',np.linalg.norm(x_r.squeeze() - x_r3.squeeze()) - print 'diff3 = ',np.linalg.norm(x_r2.squeeze() - x_r3.squeeze()) - print "Bob's total time = ", t1all - print "Nic's total time = ", t2all - print "Skl's total time = ", t3all - if np.linalg.norm(x_r.squeeze() - x_r2.squeeze()) > 1e-10 or \ - np.linalg.norm(x_r.squeeze() - x_r3.squeeze()) > 1e-10 or \ - np.linalg.norm(x_r2.squeeze() - x_r3.squeeze()) > 1e-10: - print "STOP: Different results" - print "Bob's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r).squeeze()) - print "Nic's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r2).squeeze()) - print "Skl's residual: ||y - D x_r ||_2 = ",np.linalg.norm(y.squeeze() - np.dot(D,x_r3).squeeze()) - raise ValueError("Different results") - - # debais to remove small entries - for nn in idx_r: - if abs(x_r[nn]) < 1e-10: - x_r[nn] = 0 - - # exact recovery condition using support - #if sorted(np.flatnonzero(x_r)) == sorted(np.flatnonzero(x)): - # successCounter += 1 - # exact recovery condition using error in solution - error = x - x_r - """ the following is the exact recovery condition in: A. Maleki - and D. L. Donoho, "Optimally tuned iterative reconstruction - algorithms for compressed sensing," IEEE J. Selected Topics - in Signal Process., vol. 4, pp. 330-341, Apr. 2010. """ - if np.vdot(error,error) < np.vdot(x,x)*1e-4: - successCounter += 1 - trial -= 1 - - success[rhoCounter,deltaCounter] = successCounter - if successCounter == 0: - break - - rhoCounter += 1 - #np.savetxt('test.txt',success,fmt='#2.1d',delimiter=',') - deltaCounter += 1 - -if __name__ == '__main__': - print ('Running problem suite') - ambientDimension = 400 - numGradations = 30 - numTrials = 1 - - #import cProfile - #cProfile.run('runProblemSuite(ambientDimension,numGradations,numTrials)','profres') - runProblemSuite(ambientDimension,numGradations,numTrials) - print "Done" - - #import pstats - #p = pstats.Stats('D:\Nic\Dev2\profres') - #p.sort_stats('cumulative').print_stats(10)
--- a/scripts/ABSapprox.py Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,342 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Sat Nov 05 18:08:40 2011 - -@author: Nic -""" - -import numpy as np -import scipy.io -import math -import os -import time - -import stdparams -import pyCSalgos.Analysis - -#========================== -# Pool initializer function (multiprocessing) -# Needed to pass the shared variable to the worker processes -# The variables must be global in the module in order to be seen later in run_once_tuple() -# see http://stackoverflow.com/questions/1675766/how-to-combine-pool-map-with-array-shared-memory-in-python-multiprocessing -#========================== -def initProcess(share, njobs): - import sys - currmodule = sys.modules[__name__] - currmodule.proccount = share - currmodule.njobs = njobs - -#========================== -# Interface run functions -#========================== -def run_mp(std=stdparams.std2,ncpus=None): - - algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,doshowplot,dosaveplot,saveplotbase,saveplotexts = std() - run_multi(algosN, algosL, d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata=dosavedata,savedataname=savedataname,\ - doparallel=True, ncpus=ncpus,\ - doshowplot=doshowplot,dosaveplot=dosaveplot,saveplotbase=saveplotbase,saveplotexts=saveplotexts) - -def run(std=stdparams.std2): - algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,doshowplot,dosaveplot,saveplotbase,saveplotexts = std() - run_multi(algosN, algosL, d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata=dosavedata,savedataname=savedataname,\ - doparallel=False,\ - doshowplot=doshowplot,dosaveplot=dosaveplot,saveplotbase=saveplotbase,saveplotexts=saveplotexts) -#========================== -# Main functions -#========================== -def run_multi(algosN, algosL, d, sigma, deltas, rhos, lambdas, numvects, SNRdb, - doparallel=False, ncpus=None,\ - doshowplot=False, dosaveplot=False, saveplotbase=None, saveplotexts=None,\ - dosavedata=False, savedataname=None): - - print "This is analysis recovery ABS approximation script by Nic" - print "Running phase transition ( run_multi() )" - - # Not only for parallel - #if doparallel: - import multiprocessing - # Shared value holding the number of finished processes - # Add it as global of the module - import sys - currmodule = sys.modules[__name__] - currmodule.proccount = multiprocessing.Value('I', 0) # 'I' = unsigned int, see docs (multiprocessing, array) - - if dosaveplot or doshowplot: - try: - import matplotlib - if doshowplot or os.name == 'nt': - print "Importing matplotlib with default (GUI) backend... ", - else: - print "Importing matplotlib with \"Cairo\" backend... ", - matplotlib.use('Cairo') - import matplotlib.pyplot as plt - import matplotlib.cm as cm - print "OK" - except: - print "FAIL" - print "Importing matplotlib.pyplot failed. No figures at all" - print "Try selecting a different backend" - doshowplot = False - dosaveplot = False - - # Print summary of parameters - print "Parameters:" - if doparallel: - if ncpus is None: - print " Running in parallel with default threads using \"multiprocessing\" package" - else: - print " Running in parallel with",ncpus,"threads using \"multiprocessing\" package" - else: - print "Running single thread" - if doshowplot: - print " Showing figures" - else: - print " Not showing figures" - if dosaveplot: - print " Saving figures as "+saveplotbase+"* with extensions ",saveplotexts - else: - print " Not saving figures" - print " Running algorithms",[algotuple[1] for algotuple in algosN],[algotuple[1] for algotuple in algosL] - - nalgosN = len(algosN) - nalgosL = len(algosL) - - meanmatrix = dict() - elapsed = dict() - for i,algo in zip(np.arange(nalgosN),algosN): - meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) - elapsed[algo[1]] = 0 - for i,algo in zip(np.arange(nalgosL),algosL): - meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) - elapsed[algo[1]] = np.zeros(lambdas.size) - - # Prepare parameters - jobparams = [] - print " (delta, rho) pairs to be run:" - for idelta,delta in zip(np.arange(deltas.size),deltas): - for irho,rho in zip(np.arange(rhos.size),rhos): - - # Generate data and operator - Omega,x0,y,M,realnoise = generateData(d,sigma,delta,rho,numvects,SNRdb) - - #Save the parameters, and run after - print " delta = ",delta," rho = ",rho - jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) - - # Not only for parallel - #if doparallel: - currmodule.njobs = deltas.size * rhos.size - print "End of parameters" - - # Run - jobresults = [] - - if doparallel: - pool = multiprocessing.Pool(4,initializer=initProcess,initargs=(currmodule.proccount,currmodule.njobs)) - jobresults = pool.map(run_once_tuple, jobparams) - else: - for jobparam in jobparams: - jobresults.append(run_once_tuple(jobparam)) - - # Read results - idx = 0 - for idelta,delta in zip(np.arange(deltas.size),deltas): - for irho,rho in zip(np.arange(rhos.size),rhos): - mrelerrN,mrelerrL,addelapsed = jobresults[idx] - idx = idx+1 - for algotuple in algosN: - meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] - if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): - meanmatrix[algotuple[1]][irho,idelta] = 0 - elapsed[algotuple[1]] = elapsed[algotuple[1]] + addelapsed[algotuple[1]] - for algotuple in algosL: - for ilbd in np.arange(lambdas.size): - meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] - if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): - meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 - elapsed[algotuple[1]][ilbd] = elapsed[algotuple[1]][ilbd] + addelapsed[algotuple[1]][ilbd] - - # Save - if dosavedata: - tosave = dict() - tosave['meanmatrix'] = meanmatrix - tosave['elapsed'] = elapsed - tosave['d'] = d - tosave['sigma'] = sigma - tosave['deltas'] = deltas - tosave['rhos'] = rhos - tosave['numvects'] = numvects - tosave['SNRdb'] = SNRdb - tosave['lambdas'] = lambdas - # Save algo names as cell array - obj_arr = np.zeros((len(algosN)+len(algosL),), dtype=np.object) - idx = 0 - for algotuple in algosN+algosL: - obj_arr[idx] = algotuple[1] - idx = idx+1 - tosave['algonames'] = obj_arr - try: - scipy.io.savemat(savedataname, tosave) - except: - print "Save error" - # Show - if doshowplot or dosaveplot: - for algotuple in algosN: - algoname = algotuple[1] - plt.figure() - plt.imshow(meanmatrix[algoname], cmap=cm.gray, interpolation='nearest',origin='lower') - if dosaveplot: - for ext in saveplotexts: - plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') - for algotuple in algosL: - algoname = algotuple[1] - for ilbd in np.arange(lambdas.size): - plt.figure() - plt.imshow(meanmatrix[algoname][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower') - if dosaveplot: - for ext in saveplotexts: - plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') - if doshowplot: - plt.show() - - print "Finished." - -def run_once_tuple(t): - results = run_once(*t) - import sys - currmodule = sys.modules[__name__] - currmodule.proccount.value = currmodule.proccount.value + 1 - print "================================" - print "Finished job",currmodule.proccount.value,"of",currmodule.njobs - print "================================" - return results - -def run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): - - d = Omega.shape[1] - - nalgosN = len(algosN) - nalgosL = len(algosL) - - - xrec = dict() - err = dict() - relerr = dict() - elapsed = dict() - - # Prepare storage variables for algorithms non-Lambda - for i,algo in zip(np.arange(nalgosN),algosN): - xrec[algo[1]] = np.zeros((d, y.shape[1])) - err[algo[1]] = np.zeros(y.shape[1]) - relerr[algo[1]] = np.zeros(y.shape[1]) - elapsed[algo[1]] = 0 - # Prepare storage variables for algorithms with Lambda - for i,algo in zip(np.arange(nalgosL),algosL): - xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) - err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) - relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) - elapsed[algo[1]] = np.zeros(lambdas.size) - - # Run algorithms non-Lambda - for iy in np.arange(y.shape[1]): - for algofunc,strname in algosN: - epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) - try: - timestart = time.time() - xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) - elapsed[strname] = elapsed[strname] + (time.time() - timestart) - except pyCSalgos.BP.l1qec.l1qecInputValueError as e: - print "Caught exception when running algorithm",strname," :",e.message - except pyCSalgos.NESTA.NESTA.NestaError as e: - print "Caught exception when running algorithm",strname," :",e.message - err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) - relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) - for algofunc,strname in algosN: - print strname,' : avg relative error = ',np.mean(relerr[strname]) - - # Run algorithms with Lambda - for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): - for iy in np.arange(y.shape[1]): - D = np.linalg.pinv(Omega) - U,S,Vt = np.linalg.svd(D) - for algofunc,strname in algosL: - epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) - try: - timestart = time.time() - gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) - elapsed[strname][ilbd] = elapsed[strname][ilbd] + (time.time() - timestart) - except pyCSalgos.BP.l1qc.l1qcInputValueError as e: - print "Caught exception when running algorithm",strname," :",e.message - xrec[strname][ilbd,:,iy] = np.dot(D,gamma) - err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) - relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) - print 'Lambda = ',lbd,' :' - for algofunc,strname in algosL: - print ' ',strname,' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) - - # Prepare results - mrelerrN = dict() - for algotuple in algosN: - mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) - mrelerrL = dict() - for algotuple in algosL: - mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) - - return mrelerrN,mrelerrL,elapsed - - - -def generateData(d,sigma,delta,rho,numvects,SNRdb): - - # Process parameters - noiselevel = 1.0 / (10.0**(SNRdb/10.0)); - p = round(sigma*d); - m = round(delta*d); - l = round(d - rho*m); - - # Generate Omega and data based on parameters - Omega = pyCSalgos.Analysis.Generate_Analysis_Operator(d, p); - # Optionally make Omega more coherent - U,S,Vt = np.linalg.svd(Omega); - Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! - Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) - Omega = np.dot(U , np.dot(Snew,Vt)) - - # Generate data - x0,y,M,Lambda,realnoise = pyCSalgos.Analysis.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); - - return Omega,x0,y,M,realnoise - - -def runsingleexampledebug(): - d = 50.0; - sigma = 2.0 - delta = 0.9 - rho = 0.05 - numvects = 20; # Number of vectors to generate - SNRdb = 7.; # This is norm(signal)/norm(noise), so power, not energy - lbd = 10000 - - Omega,x0,y,M,realnoise = generateData(d,sigma,delta,rho,numvects,SNRdb) - D = np.linalg.pinv(Omega) - U,S,Vt = np.linalg.svd(D) - - xrec = np.zeros((d, y.shape[1])) - err = np.zeros((y.shape[1])) - relerr = np.zeros((y.shape[1])) - - for iy in np.arange(y.shape[1]): - epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) - gamma = run_sl0(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) - xrec[:,iy] = np.dot(D,gamma) - err[iy] = np.linalg.norm(x0[:,iy] - xrec[:,iy]) - relerr[iy] = err[iy] / np.linalg.norm(x0[:,iy]) - - print "Finished runsingleexampledebug()" - -# Script main -if __name__ == "__main__": - #import cProfile - #cProfile.run('mainrun()', 'profile') - run(stdparams.stdtest) - #runsingleexampledebug()
--- a/scripts/ABSapproxMultiproc.py Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,254 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Sat Nov 05 18:08:40 2011 - -@author: Nic -""" - -import numpy as np -import scipy.io -import math -from multiprocessing import Pool -doplot = True -try: - import matplotlib.pyplot as plt -except: - doplot = False -if doplot: - import matplotlib.cm as cm -import pyCSalgos -import pyCSalgos.GAP.GAP -import pyCSalgos.SL0.SL0_approx - -# Define functions that prepare arguments for each algorithm call -def run_gap(y,M,Omega,epsilon): - gapparams = {"num_iteration" : 1000,\ - "greedy_level" : 0.9,\ - "stopping_coefficient_size" : 1e-4,\ - "l2solver" : 'pseudoinverse',\ - "noise_level": epsilon} - return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0] - -def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): - - N,n = Omega.shape - #D = np.linalg.pinv(Omega) - #U,S,Vt = np.linalg.svd(D) - aggDupper = np.dot(M,D) - aggDlower = Vt[-(N-n):,:] - aggD = np.concatenate((aggDupper, lbd * aggDlower)) - aggy = np.concatenate((y, np.zeros(N-n))) - - sigmamin = 0.001 - sigma_decrease_factor = 0.5 - mu_0 = 2 - L = 10 - return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) - -def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd): - - N,n = Omega.shape - #D = np.linalg.pinv(Omega) - #U,S,Vt = np.linalg.svd(D) - aggDupper = np.dot(M,D) - aggDlower = Vt[-(N-n):,:] - aggD = np.concatenate((aggDupper, lbd * aggDlower)) - aggy = np.concatenate((y, np.zeros(N-n))) - - sigmamin = 0.001 - sigma_decrease_factor = 0.5 - mu_0 = 2 - L = 10 - return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) - - -# Define tuples (algorithm setup function, algorithm function, name) -gap = (run_gap, 'GAP') -sl0 = (run_sl0, 'SL0_approx') - -# Define which algorithms to run -# 1. Algorithms not depending on lambda -algosN = gap, # tuple -# 2. Algorithms depending on lambda (our ABS approach) -algosL = sl0, # tuple - -def mainrun(): - - nalgosN = len(algosN) - nalgosL = len(algosL) - - #Set up experiment parameters - d = 50.0; - sigma = 2.0 - #deltas = np.arange(0.05,1.,0.05) - #rhos = np.arange(0.05,1.,0.05) - deltas = np.array([0.05, 0.45, 0.95]) - rhos = np.array([0.05, 0.45, 0.95]) - #deltas = np.array([0.05]) - #rhos = np.array([0.05]) - #delta = 0.8; - #rho = 0.15; - numvects = 100; # Number of vectors to generate - SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - #lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) - lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000]) - - meanmatrix = dict() - for i,algo in zip(np.arange(nalgosN),algosN): - meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) - for i,algo in zip(np.arange(nalgosL),algosL): - meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) - - jobparams = [] - for idelta,delta in zip(np.arange(deltas.size),deltas): - for irho,rho in zip(np.arange(rhos.size),rhos): - - # Generate data and operator - Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) - - # Run algorithms - print "***** delta = ",delta," rho = ",rho - #mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) - jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) - - pool = Pool(4) - jobresults = pool.map(runoncetuple,jobparams) - - idx = 0 - for idelta,delta in zip(np.arange(deltas.size),deltas): - for irho,rho in zip(np.arange(rhos.size),rhos): - mrelerrN,mrelerrL = jobresults[idx] - idx = idx+1 - for algotuple in algosN: - meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] - if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): - meanmatrix[algotuple[1]][irho,idelta] = 0 - for algotuple in algosL: - for ilbd in np.arange(lambdas.size): - meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] - if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): - meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 - - # # Prepare matrices to show - # showmats = dict() - # for i,algo in zip(np.arange(nalgosN),algosN): - # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) - # for i,algo in zip(np.arange(nalgosL),algosL): - # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) - - # Save - tosave = dict() - tosave['meanmatrix'] = meanmatrix - tosave['d'] = d - tosave['sigma'] = sigma - tosave['deltas'] = deltas - tosave['rhos'] = rhos - tosave['numvects'] = numvects - tosave['SNRdb'] = SNRdb - tosave['lambdas'] = lambdas - try: - scipy.io.savemat('ABSapprox.mat',tosave) - except TypeError: - print "Oops, Type Error" - raise - # Show - if doplot: - for algotuple in algosN: - plt.figure() - plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower') - for algotuple in algosL: - for ilbd in np.arange(lambdas.size): - plt.figure() - plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower') - plt.show() - print "Finished." - -def genData(d,sigma,delta,rho,numvects,SNRdb): - - # Process parameters - noiselevel = 1.0 / (10.0**(SNRdb/10.0)); - p = round(sigma*d); - m = round(delta*d); - l = round(d - rho*m); - - # Generate Omega and data based on parameters - Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); - # Optionally make Omega more coherent - U,S,Vt = np.linalg.svd(Omega); - Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! - Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) - Omega = np.dot(U , np.dot(Snew,Vt)) - - # Generate data - x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); - - return Omega,x0,y,M,realnoise - -def runoncetuple(t): - return runonce(*t) - -def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): - - d = Omega.shape[1] - - nalgosN = len(algosN) - nalgosL = len(algosL) - - xrec = dict() - err = dict() - relerr = dict() - - # Prepare storage variables for algorithms non-Lambda - for i,algo in zip(np.arange(nalgosN),algosN): - xrec[algo[1]] = np.zeros((d, y.shape[1])) - err[algo[1]] = np.zeros(y.shape[1]) - relerr[algo[1]] = np.zeros(y.shape[1]) - # Prepare storage variables for algorithms with Lambda - for i,algo in zip(np.arange(nalgosL),algosL): - xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) - err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) - relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) - - # Run algorithms non-Lambda - for iy in np.arange(y.shape[1]): - for algofunc,strname in algosN: - epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) - xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) - err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) - relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) - for algotuple in algosN: - print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) - - # Run algorithms with Lambda - for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): - for iy in np.arange(y.shape[1]): - D = np.linalg.pinv(Omega) - U,S,Vt = np.linalg.svd(D) - for algofunc,strname in algosL: - epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) - gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) - xrec[strname][ilbd,:,iy] = np.dot(D,gamma) - err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) - relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) - print 'Lambda = ',lbd,' :' - for algotuple in algosL: - print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) - - # Prepare results - mrelerrN = dict() - for algotuple in algosN: - mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) - mrelerrL = dict() - for algotuple in algosL: - mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) - - return mrelerrN,mrelerrL - -# Script main -if __name__ == "__main__": - #import cProfile - #cProfile.run('mainrun()', 'profile') - - mainrun()
--- a/scripts/ABSapproxPP.py Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,232 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Sat Nov 05 18:08:40 2011 - -@author: Nic -""" - -import numpy -import scipy.io -import math -#import matplotlib.pyplot as plt -#import matplotlib.cm as cm -import pp -import pyCSalgos -import pyCSalgos.GAP.GAP -import pyCSalgos.SL0.SL0_approx - -# Define functions that prepare arguments for each algorithm call -def run_gap(y,M,Omega,epsilon): - gapparams = {"num_iteration" : 1000,\ - "greedy_level" : 0.9,\ - "stopping_coefficient_size" : 1e-4,\ - "l2solver" : 'pseudoinverse',\ - "noise_level": epsilon} - return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,numpy.zeros(Omega.shape[1]))[0] - -def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): - - N,n = Omega.shape - #D = numpy.linalg.pinv(Omega) - #U,S,Vt = numpy.linalg.svd(D) - aggDupper = numpy.dot(M,D) - aggDlower = Vt[-(N-n):,:] - aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) - aggy = numpy.concatenate((y, numpy.zeros(N-n))) - - sigmamin = 0.001 - sigma_decrease_factor = 0.5 - mu_0 = 2 - L = 10 - return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) - -# Define tuples (algorithm setup function, algorithm function, name) -gap = (run_gap, 'GAP') -sl0 = (run_sl0, 'SL0_approx') - -# Define which algorithms to run -# 1. Algorithms not depending on lambda -algosN = gap, # tuple -# 2. Algorithms depending on lambda (our ABS approach) -algosL = sl0, # tuple - -def mainrun(): - - nalgosN = len(algosN) - nalgosL = len(algosL) - - #Set up experiment parameters - d = 50; - sigma = 2.0 - #deltas = numpy.arange(0.05,0.95,0.05) - #rhos = numpy.arange(0.05,0.95,0.05) - deltas = numpy.array([0.05, 0.45, 0.95]) - rhos = numpy.array([0.05, 0.45, 0.95]) - #deltas = numpy.array([0.05]) - #rhos = numpy.array([0.05]) - #delta = 0.8; - #rho = 0.15; - numvects = 10; # Number of vectors to generate - SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.concatenate((numpy.array([0]), 10**numpy.linspace(-5, 4, 10))) - - meanmatrix = dict() - for i,algo in zip(numpy.arange(nalgosN),algosN): - meanmatrix[algo[1]] = numpy.zeros((rhos.size, deltas.size)) - for i,algo in zip(numpy.arange(nalgosL),algosL): - meanmatrix[algo[1]] = numpy.zeros((lambdas.size, rhos.size, deltas.size)) - - # PP: start job server - job_server = pp.Server(ncpus = 4) - idx = 0 - jobparams = [] - for idelta,delta in zip(numpy.arange(deltas.size),deltas): - for irho,rho in zip(numpy.arange(rhos.size),rhos): - - # Generate data and operator - Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) - - jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) - - idx = idx + 1 - - # Run algorithms - modules = ('numpy','pyCSalgos','pyCSalgos.GAP.GAP','pyCSalgos.SL0.SL0_approx') - depfuncs = () - jobs = [job_server.submit(runonce, jobparam, (run_gap,run_sl0), modules, depfuncs) for jobparam in jobparams] - #funcarray[idelta,irho] = job_server.submit(runonce,(algosN,algosL, Omega,y,lambdas,realnoise,M,x0), (run_gap,run_sl0)) - #mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) - - # Get data from jobs - idx = 0 - for idelta,delta in zip(numpy.arange(deltas.size),deltas): - for irho,rho in zip(numpy.arange(rhos.size),rhos): - print "***** delta = ",delta," rho = ",rho - mrelerrN,mrelerrL = jobs[idx]() - for algotuple in algosN: - meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] - if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): - meanmatrix[algotuple[1]][irho,idelta] = 0 - for algotuple in algosL: - for ilbd in numpy.arange(lambdas.size): - meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] - if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): - meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 - idx = idx + 1 - - # # Prepare matrices to show - # showmats = dict() - # for i,algo in zip(numpy.arange(nalgosN),algosN): - # showmats[algo[1]] = numpy.zeros(rhos.size, deltas.size) - # for i,algo in zip(numpy.arange(nalgosL),algosL): - # showmats[algo[1]] = numpy.zeros(lambdas.size, rhos.size, deltas.size) - - # Save - tosave = dict() - tosave['meanmatrix'] = meanmatrix - tosave['d'] = d - tosave['sigma'] = sigma - tosave['deltas'] = deltas - tosave['rhos'] = rhos - tosave['numvects'] = numvects - tosave['SNRdb'] = SNRdb - tosave['lambdas'] = lambdas - try: - scipy.io.savemat('ABSapprox.mat',tosave) - except TypeError: - print "Oops, Type Error" - raise - # Show - # for algotuple in algosN: - # plt.figure() - # plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest') - # for algotuple in algosL: - # for ilbd in numpy.arange(lambdas.size): - # plt.figure() - # plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest') - # plt.show() - print "Finished." - -def genData(d,sigma,delta,rho,numvects,SNRdb): - - # Process parameters - noiselevel = 1.0 / (10.0**(SNRdb/10.0)); - p = round(sigma*d); - m = round(delta*d); - l = round(d - rho*m); - - # Generate Omega and data based on parameters - Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); - # Optionally make Omega more coherent - U,S,Vt = numpy.linalg.svd(Omega); - Sdnew = S * (1+numpy.arange(S.size)) # Make D coherent, not Omega! - Snew = numpy.vstack((numpy.diag(Sdnew), numpy.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) - Omega = numpy.dot(U , numpy.dot(Snew,Vt)) - - # Generate data - x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); - - return Omega,x0,y,M,realnoise - -def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): - - d = Omega.shape[1] - - nalgosN = len(algosN) - nalgosL = len(algosL) - - xrec = dict() - err = dict() - relerr = dict() - - # Prepare storage variables for algorithms non-Lambda - for i,algo in zip(numpy.arange(nalgosN),algosN): - xrec[algo[1]] = numpy.zeros((d, y.shape[1])) - err[algo[1]] = numpy.zeros(y.shape[1]) - relerr[algo[1]] = numpy.zeros(y.shape[1]) - # Prepare storage variables for algorithms with Lambda - for i,algo in zip(numpy.arange(nalgosL),algosL): - xrec[algo[1]] = numpy.zeros((lambdas.size, d, y.shape[1])) - err[algo[1]] = numpy.zeros((lambdas.size, y.shape[1])) - relerr[algo[1]] = numpy.zeros((lambdas.size, y.shape[1])) - - # Run algorithms non-Lambda - for iy in numpy.arange(y.shape[1]): - for algofunc,strname in algosN: - epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy]) - xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) - err[strname][iy] = numpy.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) - relerr[strname][iy] = err[strname][iy] / numpy.linalg.norm(x0[:,iy]) - for algotuple in algosN: - print algotuple[1],' : avg relative error = ',numpy.mean(relerr[strname]) - - # Run algorithms with Lambda - for ilbd,lbd in zip(numpy.arange(lambdas.size),lambdas): - for iy in numpy.arange(y.shape[1]): - D = numpy.linalg.pinv(Omega) - U,S,Vt = numpy.linalg.svd(D) - for algofunc,strname in algosL: - epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy]) - gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) - xrec[strname][ilbd,:,iy] = numpy.dot(D,gamma) - err[strname][ilbd,iy] = numpy.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) - relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / numpy.linalg.norm(x0[:,iy]) - print 'Lambda = ',lbd,' :' - for algotuple in algosL: - print ' ',algotuple[1],' : avg relative error = ',numpy.mean(relerr[strname][ilbd,:]) - - # Prepare results - mrelerrN = dict() - for algotuple in algosN: - mrelerrN[algotuple[1]] = numpy.mean(relerr[algotuple[1]]) - mrelerrL = dict() - for algotuple in algosL: - mrelerrL[algotuple[1]] = numpy.mean(relerr[algotuple[1]],1) - - return mrelerrN,mrelerrL - -# Script main -if __name__ == "__main__": - mainrun() \ No newline at end of file
--- a/scripts/ABSapproxSingle.py Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,240 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Sat Nov 05 18:08:40 2011 - -@author: Nic -""" - -import numpy as np -import scipy.io -import math -doplot = True -try: - import matplotlib.pyplot as plt -except: - doplot = False -if doplot: - import matplotlib.cm as cm -import pyCSalgos -import pyCSalgos.GAP.GAP -import pyCSalgos.SL0.SL0_approx - -# Define functions that prepare arguments for each algorithm call -def run_gap(y,M,Omega,epsilon): - gapparams = {"num_iteration" : 1000,\ - "greedy_level" : 0.9,\ - "stopping_coefficient_size" : 1e-4,\ - "l2solver" : 'pseudoinverse',\ - "noise_level": epsilon} - return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0] - -def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): - - N,n = Omega.shape - #D = np.linalg.pinv(Omega) - #U,S,Vt = np.linalg.svd(D) - aggDupper = np.dot(M,D) - aggDlower = Vt[-(N-n):,:] - aggD = np.concatenate((aggDupper, lbd * aggDlower)) - aggy = np.concatenate((y, np.zeros(N-n))) - - sigmamin = 0.001 - sigma_decrease_factor = 0.5 - mu_0 = 2 - L = 10 - return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) - -def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd): - - N,n = Omega.shape - #D = np.linalg.pinv(Omega) - #U,S,Vt = np.linalg.svd(D) - aggDupper = np.dot(M,D) - aggDlower = Vt[-(N-n):,:] - aggD = np.concatenate((aggDupper, lbd * aggDlower)) - aggy = np.concatenate((y, np.zeros(N-n))) - - sigmamin = 0.001 - sigma_decrease_factor = 0.5 - mu_0 = 2 - L = 10 - return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) - - -# Define tuples (algorithm setup function, algorithm function, name) -gap = (run_gap, 'GAP') -sl0 = (run_sl0, 'SL0_approx') - -# Define which algorithms to run -# 1. Algorithms not depending on lambda -algosN = gap, # tuple -# 2. Algorithms depending on lambda (our ABS approach) -algosL = sl0, # tuple - -def mainrun(): - - nalgosN = len(algosN) - nalgosL = len(algosL) - - #Set up experiment parameters - d = 50.0; - sigma = 2.0 - #deltas = np.arange(0.05,1.,0.05) - #rhos = np.arange(0.05,1.,0.05) - deltas = np.array([0.05, 0.45, 0.95]) - rhos = np.array([0.05, 0.45, 0.95]) - #deltas = np.array([0.05]) - #rhos = np.array([0.05]) - #delta = 0.8; - #rho = 0.15; - numvects = 100; # Number of vectors to generate - SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - #lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) - lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000]) - - meanmatrix = dict() - for i,algo in zip(np.arange(nalgosN),algosN): - meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) - for i,algo in zip(np.arange(nalgosL),algosL): - meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) - - for idelta,delta in zip(np.arange(deltas.size),deltas): - for irho,rho in zip(np.arange(rhos.size),rhos): - - # Generate data and operator - Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) - - # Run algorithms - print "***** delta = ",delta," rho = ",rho - mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) - - for algotuple in algosN: - meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] - if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): - meanmatrix[algotuple[1]][irho,idelta] = 0 - for algotuple in algosL: - for ilbd in np.arange(lambdas.size): - meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] - if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): - meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 - - # # Prepare matrices to show - # showmats = dict() - # for i,algo in zip(np.arange(nalgosN),algosN): - # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) - # for i,algo in zip(np.arange(nalgosL),algosL): - # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) - - # Save - tosave = dict() - tosave['meanmatrix'] = meanmatrix - tosave['d'] = d - tosave['sigma'] = sigma - tosave['deltas'] = deltas - tosave['rhos'] = rhos - tosave['numvects'] = numvects - tosave['SNRdb'] = SNRdb - tosave['lambdas'] = lambdas - try: - scipy.io.savemat('ABSapprox.mat',tosave) - except TypeError: - print "Oops, Type Error" - raise - # Show - if doplot: - for algotuple in algosN: - plt.figure() - plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower') - for algotuple in algosL: - for ilbd in np.arange(lambdas.size): - plt.figure() - plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower') - plt.show() - print "Finished." - -def genData(d,sigma,delta,rho,numvects,SNRdb): - - # Process parameters - noiselevel = 1.0 / (10.0**(SNRdb/10.0)); - p = round(sigma*d); - m = round(delta*d); - l = round(d - rho*m); - - # Generate Omega and data based on parameters - Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); - # Optionally make Omega more coherent - U,S,Vt = np.linalg.svd(Omega); - Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! - Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) - Omega = np.dot(U , np.dot(Snew,Vt)) - - # Generate data - x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); - - return Omega,x0,y,M,realnoise - -def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): - - d = Omega.shape[1] - - nalgosN = len(algosN) - nalgosL = len(algosL) - - xrec = dict() - err = dict() - relerr = dict() - - # Prepare storage variables for algorithms non-Lambda - for i,algo in zip(np.arange(nalgosN),algosN): - xrec[algo[1]] = np.zeros((d, y.shape[1])) - err[algo[1]] = np.zeros(y.shape[1]) - relerr[algo[1]] = np.zeros(y.shape[1]) - # Prepare storage variables for algorithms with Lambda - for i,algo in zip(np.arange(nalgosL),algosL): - xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) - err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) - relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) - - # Run algorithms non-Lambda - for iy in np.arange(y.shape[1]): - for algofunc,strname in algosN: - epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) - xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) - err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) - relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) - for algotuple in algosN: - print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) - - # Run algorithms with Lambda - for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): - for iy in np.arange(y.shape[1]): - D = np.linalg.pinv(Omega) - U,S,Vt = np.linalg.svd(D) - for algofunc,strname in algosL: - epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) - gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) - xrec[strname][ilbd,:,iy] = np.dot(D,gamma) - err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) - relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) - print 'Lambda = ',lbd,' :' - for algotuple in algosL: - print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) - - # Prepare results - mrelerrN = dict() - for algotuple in algosN: - mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) - mrelerrL = dict() - for algotuple in algosL: - mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) - - return mrelerrN,mrelerrL - -# Script main -if __name__ == "__main__": - - #import cProfile - #cProfile.run('mainrun()', 'profile') - mainrun() \ No newline at end of file
--- a/scripts/algos.py Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,138 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Wed Dec 07 14:06:13 2011 - -@author: ncleju -""" - -import numpy -import pyCSalgos -import pyCSalgos.GAP.GAP -import pyCSalgos.BP.l1qc -import pyCSalgos.BP.l1qec -import pyCSalgos.SL0.SL0_approx -import pyCSalgos.OMP.omp_QR -import pyCSalgos.RecomTST.RecommendedTST -import pyCSalgos.NESTA.NESTA - -#========================== -# Algorithm functions -#========================== -def run_gap(y,M,Omega,epsilon): - gapparams = {"num_iteration" : 1000,\ - "greedy_level" : 0.9,\ - "stopping_coefficient_size" : 1e-4,\ - "l2solver" : 'pseudoinverse',\ - "noise_level": epsilon} - return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,numpy.zeros(Omega.shape[1]))[0] - -def run_bp_analysis(y,M,Omega,epsilon): - - N,n = Omega.shape - D = numpy.linalg.pinv(Omega) - U,S,Vt = numpy.linalg.svd(D) - Aeps = numpy.dot(M,D) - Aexact = Vt[-(N-n):,:] - # We don't ned any aggregate matrices anymore - - x0 = numpy.zeros(N) - return numpy.dot(D , pyCSalgos.BP.l1qec.l1qec_logbarrier(x0,Aeps,Aeps.T,y,epsilon,Aexact,Aexact.T,numpy.zeros(N-n))) - -def run_sl0_analysis(y,M,Omega,epsilon): - - N,n = Omega.shape - D = numpy.linalg.pinv(Omega) - U,S,Vt = numpy.linalg.svd(D) - Aeps = numpy.dot(M,D) - Aexact = Vt[-(N-n):,:] - # We don't ned any aggregate matrices anymore - - sigmamin = 0.001 - sigma_decrease_factor = 0.5 - mu_0 = 2 - L = 10 - return numpy.dot(D , pyCSalgos.SL0.SL0_approx.SL0_approx_analysis(Aeps,Aexact,y,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)) - -def run_nesta(y,M,Omega,epsilon): - - U,S,V = numpy.linalg.svd(M, full_matrices = True) - V = V.T # Make like Matlab - m,n = M.shape # Make like Matlab - S = numpy.hstack((numpy.diag(S), numpy.zeros((m,n-m)))) - - opt_muf = 1e-3 - optsUSV = {'U':U, 'S':S, 'V':V} - opts = {'U':Omega, 'Ut':Omega.T.copy(), 'USV':optsUSV, 'TolVar':1e-5, 'Verbose':0} - return pyCSalgos.NESTA.NESTA.NESTA(M, None, y, opt_muf, epsilon, opts)[0] - - -def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): - - N,n = Omega.shape - #D = numpy.linalg.pinv(Omega) - #U,S,Vt = numpy.linalg.svd(D) - aggDupper = numpy.dot(M,D) - aggDlower = Vt[-(N-n):,:] - aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) - aggy = numpy.concatenate((y, numpy.zeros(N-n))) - - sigmamin = 0.001 - sigma_decrease_factor = 0.5 - mu_0 = 2 - L = 10 - return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) - -def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd): - - N,n = Omega.shape - #D = numpy.linalg.pinv(Omega) - #U,S,Vt = numpy.linalg.svd(D) - aggDupper = numpy.dot(M,D) - aggDlower = Vt[-(N-n):,:] - aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) - aggy = numpy.concatenate((y, numpy.zeros(N-n))) - - x0 = numpy.zeros(N) - return pyCSalgos.BP.l1qc.l1qc_logbarrier(x0,aggD,aggD.T,aggy,epsilon) - -def run_ompeps(y,M,Omega,D,U,S,Vt,epsilon,lbd): - - N,n = Omega.shape - #D = numpy.linalg.pinv(Omega) - #U,S,Vt = numpy.linalg.svd(D) - aggDupper = numpy.dot(M,D) - aggDlower = Vt[-(N-n):,:] - aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) - aggy = numpy.concatenate((y, numpy.zeros(N-n))) - - opts = dict() - opts['stopCrit'] = 'mse' - opts['stopTol'] = epsilon**2 / aggy.size - return pyCSalgos.OMP.omp_QR.greed_omp_qr(aggy,aggD,aggD.shape[1],opts)[0] - -def run_tst(y,M,Omega,D,U,S,Vt,epsilon,lbd): - - N,n = Omega.shape - #D = numpy.linalg.pinv(Omega) - #U,S,Vt = numpy.linalg.svd(D) - aggDupper = numpy.dot(M,D) - aggDlower = Vt[-(N-n):,:] - aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) - aggy = numpy.concatenate((y, numpy.zeros(N-n))) - - nsweep = 300 - tol = epsilon / numpy.linalg.norm(aggy) - return pyCSalgos.RecomTST.RecommendedTST.RecommendedTST(aggD, aggy, nsweep=nsweep, tol=tol) - - -#========================== -# Define tuples (algorithm function, name) -#========================== -gap = (run_gap, 'GAP') -sl0 = (run_sl0, 'SL0a') -sl0analysis = (run_sl0_analysis, 'SL0a2') -bpanalysis = (run_bp_analysis, 'BPa2') -nesta = (run_nesta, 'NESTA') -bp = (run_bp, 'BP') -ompeps = (run_ompeps, 'OMPeps') -tst = (run_tst, 'TST') \ No newline at end of file
--- a/scripts/stdparams.py Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,287 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Wed Dec 07 14:04:40 2011 - -@author: ncleju -""" - -import numpy -from algos import * - -#========================== -# Standard parameters -#========================== -# Standard parameters for quick testing -# Algorithms: GAP, SL0 and BP -# d=50, sigma = 2, delta and rho only 3 x 3, lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 -# Do save data, do save plots, don't show plots -# Useful for short testing -def stdtest(): - # Define which algorithms to run - algosN = nesta, # tuple of algorithms not depending on lambda - #algosL = sl0,bp # tuple of algorithms depending on lambda (our ABS approach) - algosL = sl0, - - d = 50.0 - sigma = 2.0 - deltas = numpy.array([0.05, 0.45, 0.95]) - rhos = numpy.array([0.05, 0.45, 0.95]) - #deltas = numpy.array([0.95]) - #deltas = numpy.arange(0.05,1.,0.05) - #rhos = numpy.array([0.05]) - numvects = 10; # Number of vectors to generate - SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) - - dosavedata = True - savedataname = 'approx_pt_stdtest.mat' - doshowplot = False - dosaveplot = True - saveplotbase = 'approx_pt_stdtest_' - saveplotexts = ('png','pdf','eps') - - return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ - doshowplot,dosaveplot,saveplotbase,saveplotexts - - -# Standard parameters 1 -# All algorithms, 100 vectors -# d=50, sigma = 2, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 -# Do save data, do save plots, don't show plots -def std1(): - # Define which algorithms to run - algosN = gap,sl0analysis,bpanalysis,nesta # tuple of algorithms not depending on lambda - algosL = sl0,bp,ompeps,tst # tuple of algorithms depending on lambda (our ABS approach) - - d = 50.0; - sigma = 2.0 - deltas = numpy.arange(0.05,1.,0.05) - rhos = numpy.arange(0.05,1.,0.05) - numvects = 100; # Number of vectors to generate - SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) - - dosavedata = True - savedataname = 'approx_pt_std1.mat' - doshowplot = False - dosaveplot = True - saveplotbase = 'approx_pt_std1_' - saveplotexts = ('png','pdf','eps') - - return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ - doshowplot,dosaveplot,saveplotbase,saveplotexts - - -# Standard parameters 2 -# All algorithms, 100 vectors -# d=20, sigma = 10, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 -# Do save data, do save plots, don't show plots -def std2(): - # Define which algorithms to run - algosN = gap,sl0analysis,bpanalysis,nesta # tuple of algorithms not depending on lambda - algosL = sl0,bp,ompeps,tst # tuple of algorithms depending on lambda (our ABS approach) - - d = 20.0 - sigma = 10.0 - deltas = numpy.arange(0.05,1.,0.05) - rhos = numpy.arange(0.05,1.,0.05) - numvects = 100; # Number of vectors to generate - SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) - - dosavedata = True - savedataname = 'approx_pt_std2.mat' - doshowplot = False - dosaveplot = True - saveplotbase = 'approx_pt_std2_' - saveplotexts = ('png','pdf','eps') - - return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ - doshowplot,dosaveplot,saveplotbase,saveplotexts - - - # Standard parameters 3 -# All algorithms, 100 vectors -# d=50, sigma = 2, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 -# Do save data, do save plots, don't show plots -# IDENTICAL with 1 but with 10dB SNR noise -def std3(): - # Define which algorithms to run - algosN = gap,sl0analysis,bpanalysis,nesta # tuple of algorithms not depending on lambda - algosL = sl0,bp,ompeps,tst # tuple of algorithms depending on lambda (our ABS approach) - - d = 50.0; - sigma = 2.0 - deltas = numpy.arange(0.05,1.,0.05) - rhos = numpy.arange(0.05,1.,0.05) - numvects = 100; # Number of vectors to generate - SNRdb = 10.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) - - dosavedata = True - savedataname = 'approx_pt_std3.mat' - doshowplot = False - dosaveplot = True - saveplotbase = 'approx_pt_std3_' - saveplotexts = ('png','pdf','eps') - - return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ - doshowplot,dosaveplot,saveplotbase,saveplotexts - -# Standard parameters 4 -# All algorithms, 100 vectors -# d=20, sigma = 10, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 -# Do save data, do save plots, don't show plots -# Identical to 2 but with 10dB SNR noise -def std4(): - # Define which algorithms to run - algosN = gap,sl0analysis,bpanalysis,nesta # tuple of algorithms not depending on lambda - algosL = sl0,bp,ompeps,tst # tuple of algorithms depending on lambda (our ABS approach) - - d = 20.0 - sigma = 10.0 - deltas = numpy.arange(0.05,1.,0.05) - rhos = numpy.arange(0.05,1.,0.05) - numvects = 100; # Number of vectors to generate - SNRdb = 10.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) - - dosavedata = True - savedataname = 'approx_pt_std4.mat' - doshowplot = False - dosaveplot = True - saveplotbase = 'approx_pt_std4_' - saveplotexts = ('png','pdf','eps') - - return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ - doshowplot,dosaveplot,saveplotbase,saveplotexts - -# Standard parameters 1nesta -# Only NESTA, 100 vectors -# d=50, sigma = 2, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 -# Do save data, do save plots, don't show plots -# Identical to std1 but with only NESTA -def std1nesta(): - # Define which algorithms to run - algosN = nesta, # tuple of algorithms not depending on lambda - algosL = () # tuple of algorithms depending on lambda (our ABS approach) - - d = 50.0; - sigma = 2.0 - deltas = numpy.arange(0.05,1.,0.05) - rhos = numpy.arange(0.05,1.,0.05) - numvects = 100; # Number of vectors to generate - SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) - - dosavedata = True - savedataname = 'approx_pt_std1nesta.mat' - doshowplot = False - dosaveplot = True - saveplotbase = 'approx_pt_std1nesta_' - saveplotexts = ('png','pdf','eps') - - return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ - doshowplot,dosaveplot,saveplotbase,saveplotexts - -# Standard parameters 2nesta -# Only NESTA, 100 vectors -# d=20, sigma = 10, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 -# Do save data, do save plots, don't show plots -# Identical with std2, but with only NESTA -def std2nesta(): - # Define which algorithms to run - algosN = nesta, # tuple of algorithms not depending on lambda - algosL = () # tuple of algorithms depending on lambda (our ABS approach) - - d = 20.0 - sigma = 10.0 - deltas = numpy.arange(0.05,1.,0.05) - rhos = numpy.arange(0.05,1.,0.05) - numvects = 100; # Number of vectors to generate - SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) - - dosavedata = True - savedataname = 'approx_pt_std2nesta.mat' - doshowplot = False - dosaveplot = True - saveplotbase = 'approx_pt_std2nesta_' - saveplotexts = ('png','pdf','eps') - - return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ - doshowplot,dosaveplot,saveplotbase,saveplotexts - - # Standard parameters 3nesta -# Only NESTA, 100 vectors -# d=50, sigma = 2, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 -# Do save data, do save plots, don't show plots -# IDENTICAL with 3 but with only NESTA -def std3nesta(): - # Define which algorithms to run - algosN = nesta, # tuple of algorithms not depending on lambda - algosL = () # tuple of algorithms depending on lambda (our ABS approach) - - d = 50.0; - sigma = 2.0 - deltas = numpy.arange(0.05,1.,0.05) - rhos = numpy.arange(0.05,1.,0.05) - numvects = 100; # Number of vectors to generate - SNRdb = 10.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) - - dosavedata = True - savedataname = 'approx_pt_std3nesta.mat' - doshowplot = False - dosaveplot = True - saveplotbase = 'approx_pt_std3nesta_' - saveplotexts = ('png','pdf','eps') - - return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ - doshowplot,dosaveplot,saveplotbase,saveplotexts - -# Standard parameters 4nesta -# Only NESTA, 100 vectors -# d=20, sigma = 10, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 -# Do save data, do save plots, don't show plots -# Identical to 4 but with only NESTA -def std4nesta(): - # Define which algorithms to run - algosN = nesta, # tuple of algorithms not depending on lambda - algosL = () # tuple of algorithms depending on lambda (our ABS approach) - - d = 20.0 - sigma = 10.0 - deltas = numpy.arange(0.05,1.,0.05) - rhos = numpy.arange(0.05,1.,0.05) - numvects = 100; # Number of vectors to generate - SNRdb = 10.; # This is norm(signal)/norm(noise), so power, not energy - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) - - dosavedata = True - savedataname = 'approx_pt_std4nesta.mat' - doshowplot = False - dosaveplot = True - saveplotbase = 'approx_pt_std4nesta_' - saveplotexts = ('png','pdf','eps') - - return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ - doshowplot,dosaveplot,saveplotbase,saveplotexts \ No newline at end of file
--- a/scripts/study_analysis_rec_algos_noisy.m Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,417 +0,0 @@ -% File: study_analysis_rec_algos -% Run global experiment to compare algorithms used for analysis-based reconstruction -% and plot phast transition graphs - -clear all -close all - -% ================================= -% Set up experiment parameters -%================================== -% Which form factor, delta and rho we want -sigmas = 1.2; -deltas = 0.05:0.05:0.95; -rhos = 0.05:0.05:0.95; -% deltas = [0.95]; -% rhos = [0.1]; -%deltas = 0.3:0.3:0.9; -%rhos = 0.3:0.3:0.9; - -% Number of vectors to generate each time -numvects = 100; - -% Add noise -% This is norm(signal)/norm(noise), so power, not energy -SNRdb = 20; % virtually no noise - -% Show progressbar ? (not recommended when running on parallel threads) -do_progressbar = 0; - -% Value of lambda -lambda = 1e-2; - -% What algos to run -do_abs_ompk = 1; -do_abs_ompeps = 1; -do_abs_tst = 1; -do_abs_bp = 1; -do_gap = 1; -do_nesta = 0; - -% ================================= -% Processing the parameters -%================================== -% Set up parameter structure -count = 0; -for isigma = 1:sigmas - for idelta = 1:numel(deltas) - for irho = 1:numel(rhos) - sigma = sigmas(isigma); - delta = deltas(idelta); - rho = rhos(irho); - - d = 200; - p = round(sigma*d); - m = round(delta*d); - l = round(d - rho*m); - - params(count+1).d = d; - params(count+1).p = p; - params(count+1).m = m; - params(count+1).l = l; - - count = count + 1; - end - end -end - -% Compute noiselevel from db -noiselevel = 1 / (10^(SNRdb/10)); - -%load study_analysis_init - -% Generate an analysis operator Omega -Omega = Generate_Analysis_Operator(d, p); - -% Progressbar -if do_progressbar - progressbar('Total', 'Current parameters'); -end - -% Init times -elapsed_abs_ompk = 0; -elapsed_abs_ompeps = 0; -elapsed_abs_tst = 0; -elapsed_abs_bp = 0; -elapsed_gap = 0; -elapsed_nesta = 0; - -% Init results structure -results = []; - -% Prepare progressbar reduction variables -% if do_progressbar -% incr2 = 1/numvects; -% incr1 = 1/numvects/count; -% frac2 = 0; -% frac1 = 0; -% end - -% ======== -% Run -% ======== -parfor iparam = 1:numel(params) - - % Read current parameters - d = params(iparam).d; - p = params(iparam).p; - m = params(iparam).m; - l = params(iparam).l; - - % Init stuff - xrec_abs_ompk = zeros(d, numvects); - xrec_abs_ompeps = zeros(d, numvects); - xrec_abs_tst = zeros(d, numvects); - xrec_abs_bp = zeros(d, numvects); - xrec_gap = zeros(d, numvects); - xrec_nesta = zeros(d, numvects); - % - err_abs_ompk = zeros(numvects,1); - relerr_abs_ompk = zeros(numvects,1); - err_abs_ompeps = zeros(numvects,1); - relerr_abs_ompeps = zeros(numvects,1); - err_abs_tst = zeros(numvects,1); - relerr_abs_tst = zeros(numvects,1); - err_abs_bp = zeros(numvects,1); - relerr_abs_bp = zeros(numvects,1); - err_gap = zeros(numvects,1); - relerr_gap = zeros(numvects,1); - err_nesta = zeros(numvects,1); - relerr_nesta = zeros(numvects,1); - - % Generate data based on parameters - [x0,y,M,Lambda] = Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); - - % For every generated signal do - for iy = 1:size(y,2) - - % Compute epsilon - epsilon = noiselevel * norm(y(:,iy)); - - %-------------------------------- - % Reconstruct (and measure delay) - % Compute reconstruction error - %-------------------------------- - % ABS-OMPk - if (do_abs_ompk) - timer_abs_ompk = tic; - xrec_abs_ompk(:,iy) = ABS_OMPk_approx(y(:,iy), Omega, M, p-l, lambda); - elapsed_abs_ompk = elapsed_abs_ompk + toc(timer_abs_ompk); - % - err_abs_ompk(iy) = norm(x0(:,iy) - xrec_abs_ompk(:,iy)); - relerr_abs_ompk(iy) = err_abs_ompk(iy) / norm(x0(:,iy)); - end - % ABS-OMPeps - if (do_abs_ompeps) - timer_abs_ompeps = tic; - xrec_abs_ompeps(:,iy) = ABS_OMPeps_approx(y(:,iy), Omega, M, epsilon, lambda); - elapsed_abs_ompeps = elapsed_abs_ompeps + toc(timer_abs_ompeps); - % - err_abs_ompeps(iy) = norm(x0(:,iy) - xrec_abs_ompeps(:,iy)); - relerr_abs_ompeps(iy) = err_abs_ompeps(iy) / norm(x0(:,iy)); - end - % ABS-TST - if (do_abs_tst) - timer_abs_tst = tic; - xrec_abs_tst(:,iy) = ABS_TST_approx(y(:,iy), Omega, M, epsilon, lambda); - elapsed_abs_tst = elapsed_abs_tst + toc(timer_abs_tst); - % - err_abs_tst(iy) = norm(x0(:,iy) - xrec_abs_tst(:,iy)); - relerr_abs_tst(iy) = err_abs_tst(iy) / norm(x0(:,iy)); - end - % ABS-BP - if (do_abs_bp) - timer_abs_bp = tic; - xrec_abs_bp(:,iy) = ABS_BP_approx(y(:,iy), Omega, M, epsilon, lambda); - elapsed_abs_bp = elapsed_abs_bp + toc(timer_abs_bp); - % - err_abs_bp(iy) = norm(x0(:,iy) - xrec_abs_bp(:,iy)); - relerr_abs_bp(iy) = err_abs_bp(iy) / norm(x0(:,iy)); - end - % GAP - if (do_gap) - gapparams = []; - gapparams.num_iteration = 40; - gapparams.greedy_level = 0.9; - gapparams.stopping_coefficient_size = 1e-4; - gapparams.l2solver = 'pseudoinverse'; - gapparams.noise_level = noiselevel; - timer_gap = tic; - xrec_gap(:,iy) = GAP(y(:,iy), M, M', Omega, Omega', gapparams, zeros(d,1)); - elapsed_gap = elapsed_gap + toc(timer_gap); - % - err_gap(iy) = norm(x0(:,iy) - xrec_gap(:,iy)); - relerr_gap(iy) = err_gap(iy) / norm(x0(:,iy)); - end - % NESTA - if (do_nesta) - try - timer_nesta = tic; - xrec_nesta(:,iy) = do_nesta_DemoNonProjector(x0(:,iy), M, Omega', 0); - elapsed_nesta = elapsed_nesta + toc(timer_nesta); - catch err - disp('*****ERROR: NESTA throwed error *****'); - xrec_nesta(:,iy) = zeros(size(x0(:,iy))); - end - % - err_nesta(iy) = norm(x0(:,iy) - xrec_nesta(:,iy)); - relerr_nesta(iy) = err_nesta(iy) / norm(x0(:,iy)); - end - - % Update progressbar -% if do_progressbar -% %frac2 = iy/numvects; -% %frac1 = ((iparam-1) + frac2) / count; -% if norm(frac2 - 1) < 1e-6 -% frac2 = 0; -% end -% frac2 = frac2 + incr2; -% frac1 = frac1 + incr1; -% progressbar(frac1, frac2); -% end - end - - %-------------------------------- - % Save results in big stucture & display - %-------------------------------- - % Save reconstructed signals - % Save rel & abs errors - % Display error - disp(['Simulation no. ' num2str(iparam)]); - if (do_abs_ompk) - results(iparam).xrec_abs_ompk = xrec_abs_ompk; - results(iparam).err_abs_ompk = err_abs_ompk; - results(iparam).relerr_abs_ompk = relerr_abs_ompk; - disp([' ABS_OMPk: avg relative error = ' num2str(mean(relerr_abs_ompk))]); - end - if (do_abs_ompeps) - results(iparam).xrec_abs_ompeps = xrec_abs_ompeps; - results(iparam).err_abs_ompeps = err_abs_ompeps; - results(iparam).relerr_abs_ompeps = relerr_abs_ompeps; - disp([' ABS_OMPeps: avg relative error = ' num2str(mean(relerr_abs_ompeps))]); - end - if (do_abs_tst) - results(iparam).xrec_abs_tst = xrec_abs_tst; - results(iparam).err_abs_tst = err_abs_tst; - results(iparam).relerr_abs_tst = relerr_abs_tst; - disp([' ABS_TST: avg relative error = ' num2str(mean(relerr_abs_tst))]); - end - if (do_abs_bp) - results(iparam).xrec_abs_bp = xrec_abs_bp; - results(iparam).err_abs_bp = err_abs_bp; - results(iparam).relerr_abs_bp = relerr_abs_bp; - disp([' ABS_BP: avg relative error = ' num2str(mean(relerr_abs_bp))]); - end - if (do_gap) - results(iparam).xrec_gap = xrec_gap; - results(iparam).err_gap = err_gap; - results(iparam).relerr_gap = relerr_gap; - disp([' GAP: avg relative error = ' num2str(mean(relerr_gap))]); - end - if (do_nesta) - results(iparam).xrec_nesta = xrec_nesta; - results(iparam).err_nesta = err_nesta; - results(iparam).relerr_nesta = relerr_nesta; - disp([' NESTA: avg relative error = ' num2str(mean(relerr_nesta))]); - end -end - -% ================================= -% Save -% ================================= -save mat/avgerr_SNR20_lbd1e-2 - -% ================================= -% Plot phase transition -% ================================= -%-------------------------------- -% Prepare -%-------------------------------- -%d = 200; -%m = 190; -%exactthr = d/m * noiselevel; -%sigma = 1.2; -iparam = 1; -for idelta = 1:numel(deltas) - for irho = 1:numel(rhos) - % Create exact recovery count matrix -% nexact_abs_bp (irho, idelta) = sum(results(iparam).relerr_abs_bp < exactthr); -% nexact_abs_ompk (irho, idelta) = sum(results(iparam).relerr_abs_ompk < exactthr); -% nexact_abs_ompeps (irho, idelta) = sum(results(iparam).relerr_abs_ompeps < exactthr); -% nexact_gap (irho, idelta) = sum(results(iparam).relerr_gap < exactthr); -% nexact_abs_tst (irho, idelta) = sum(results(iparam).relerr_abs_tst < exactthr); -% % nexact_nesta(irho, idelta) = sum(results(iparam).relerr_nesta < exactthr); - - % Get histogram (for a single param set only!) -% hist_abs_ompk = hist(results(iparam).relerr_abs_ompk, 0.001:0.001:0.1); -% hist_abs_ompeps = hist(results(iparam).relerr_abs_ompeps, 0.001:0.001:0.1); -% hist_abs_tst = hist(results(iparam).relerr_abs_tst, 0.001:0.001:0.1); -% hist_abs_bp = hist(results(iparam).relerr_abs_bp, 0.001:0.001:0.1); -% hist_gap = hist(results(iparam).relerr_gap, 0.001:0.001:0.1); - - % Compute average error value - if (do_abs_ompk) - avgerr_abs_ompk(irho, idelta) = 1 - mean(results(iparam).relerr_abs_ompk); - avgerr_abs_ompk(avgerr_abs_ompk < 0) = 0; - end - if (do_abs_ompeps) - avgerr_abs_ompeps(irho, idelta) = 1 - mean(results(iparam).relerr_abs_ompeps); - avgerr_abs_ompeps(avgerr_abs_ompeps < 0) = 0; - end - if (do_abs_tst) - avgerr_abs_tst(irho, idelta) = 1 - mean(results(iparam).relerr_abs_tst); - avgerr_abs_tst(avgerr_abs_tst < 0) = 0; - end - if (do_abs_bp) - avgerr_abs_bp(irho, idelta) = 1 - mean(results(iparam).relerr_abs_bp); - avgerr_abs_bp(avgerr_abs_bp < 0) = 0; - end - if (do_gap) - avgerr_gap(irho, idelta) = 1 - mean(results(iparam).relerr_gap); - avgerr_gap(avgerr_gap < 0) = 0; - end - if (do_nesta) - avgerr_nesta(irho, idelta) = 1 - mean(results(iparam).relerr_nesta); - avgerr_nesta(avgerr_nesta < 0) = 0; - end - - iparam = iparam + 1; - end -end - -%-------------------------------- -% Plot -%-------------------------------- -show_phasetrans = @show_phasetrans_win; -iptsetpref('ImshowAxesVisible', 'on'); -close all -figbase = 'figs/avgerr_SNR20_lbd1e-2_'; -do_save = 1; -% -if (do_abs_ompk) - figure; - %h = show_phasetrans(nexact_abs_ompk, numvects); - %bar(0.001:0.001:0.1, hist_abs_ompk); - h = show_phasetrans(avgerr_abs_ompk, 1); - if do_save - figname = [figbase 'ABS_OMPk']; - saveas(h, [figname '.fig']); - saveas(h, [figname '.png']); - saveTightFigure(h, [figname '.pdf']); - end -end -% -if (do_abs_ompeps) - figure; - %h = show_phasetrans(nexact_abs_ompeps, numvects); - %bar(0.001:0.001:0.1, hist_abs_ompeps); - h = show_phasetrans(avgerr_abs_ompeps, 1); - if do_save - figname = [figbase 'ABS_OMPeps']; - saveas(h, [figname '.fig']); - saveas(h, [figname '.png']); - saveTightFigure(h, [figname '.pdf']); - end -end -% -if (do_abs_tst) - figure; - %h = show_phasetrans(nexact_abs_tst, numvects); - %bar(0.001:0.001:0.1, hist_abs_tst); - h = show_phasetrans(avgerr_abs_tst, 1); - if do_save - figname = [figbase 'ABS_TST']; - saveas(h, [figname '.fig']); - saveas(h, [figname '.png']); - saveTightFigure(h, [figname '.pdf']); - end -end -% -if (do_abs_bp) - figure; - %h = show_phasetrans(nexact_abs_bp, numvects); - %bar(0.001:0.001:0.1, hist_abs_bp); - h = show_phasetrans(avgerr_abs_bp, 1); - if do_save - figname = [figbase 'ABS_BP']; - saveas(h, [figname '.fig']); - saveas(h, [figname '.png']); - saveTightFigure(h, [figname '.pdf']); - end -end -% -if (do_gap) - figure; - %h = show_phasetrans(nexact_gap, numvects); - %bar(0.001:0.001:0.1, hist_gap); - h = show_phasetrans(avgerr_gap, 1); - if do_save - figname = [figbase 'GAP']; - saveas(h, [figname '.fig']); - saveas(h, [figname '.png']); - saveTightFigure(h, [figname '.pdf']); - end -end -% -if (do_nesta) - figure; - %h = show_phasetrans(nexact_nesta, numvects); - %bar(0.001:0.001:0.1, hist_nesta); - h = show_phasetrans(avgerr_nesta, 1); - if do_save - figname = [figbase 'NESTA']; - saveas(h, [figname '.fig']); - saveas(h, [figname '.png']); - saveTightFigure(h, [figname '.pdf']); - end -end \ No newline at end of file
--- a/scripts/study_analysis_rec_approx.m Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,363 +0,0 @@ -% File: study_analysis_rec_algos -% Run experiment to prove that our approx. ABS approach really converges to -% the anaysis solution as lambda -> infty -% For this, we need to generate signals that, provably, yield bad results -% with synthesis recovery, and good results with analysis recovery -% To do this we choose, N >> d, small l, and m close to d - -clear all -close all - -% ================================= -% Set up experiment parameters -%================================== -% Which form factor, delta and rho we want -sigma = 2; -%delta = 0.995; -%rho = 0.7; -%delta = 0.8; -%rho = 0.15; -delta = 0.5; -rho = 0.05; - -% Number of vectors to generate each time -numvects = 10; - -% Add noise -% This is norm(signal)/norm(noise), so power, not energy -SNRdb = 20; -%epsextrafactor = 2 - -% ================================= -% Processing the parameters -%================================== - -% Compute noiselevel from db -noiselevel = 1 / (10^(SNRdb/10)); - -% Set up parameter structure -% and generate data X as well -d = 50; -p = round(sigma*d); -m = round(delta*d); -l = round(d - rho*m); - -% Generate Omega and data based on parameters -Omega = Generate_Analysis_Operator(d, p); -%Make Omega more coherent -[U, S, V] = svd(Omega); -%Sdnew = diag(S) ./ (1:numel(diag(S)))'; -Sdnew = diag(S) .* (1:numel(diag(S)))'; % Make D coherent, not Omega! -Snew = [diag(Sdnew); zeros(size(S,1) - size(S,2), size(S,2))]; -Omega = U * Snew * V'; -[x0,y,M,Lambda, realnoise] = Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); - -datafilename = 'mat/data_SNR20'; -save(datafilename); -%load mat/data_SNR20 - -% Values of lambda -%lambdas = sqrt([1e-10 1e-8 1e-6 1e-4 1e-2 1 100 1e4 1e6 1e8 1e10]); -lambdas = [0 10.^linspace(-5, 4, 10)]; -%lambdas = 1000 -%lambdas = sqrt([0 1 10000]); - -% Algorithm identifiers -algonone = []; -ompk = 1; -ompeps = 2; -tst = 3; -bp = 4; -gap = 5; -nesta = 6; -sl0 = 7; -yall1 = 8; -spgl1 = 9; -nestasynth = 10; -numallalgos = 10; -algoname{ompk} = 'ABS OMPk'; -algoname{ompeps} = 'ABS OMPeps'; -algoname{tst} = 'ABS TST'; -algoname{bp} = 'ABS BP'; -algoname{gap} = 'GAP'; -algoname{nesta} = 'NESTA Analysis'; -algoname{sl0} = 'ABS SL0'; -algoname{yall1} = 'ABS YALL1'; -algoname{spgl1} = 'ABS SPGL1'; -algoname{nestasynth} = 'NESTA Synth'; - -% What algos to run -%algos = [gap, ompk, ompeps, tst, bp, sl0, yall1]; -%algos = [gap, ompk, ompeps, tst, bp]; -algos = [gap, sl0]; -numalgos = numel(algos); - -% Save mat file? -do_save_mat = 0; -matfilename = 'mat/approx_d50_sigma2_SNR20db_Dcoh'; - -% Save figures? -do_save_figs = 0; -figfilename = 'figs/approx_d50_sigma2_SNR20db_Dcoh'; - -% Show progressbar ? (not recommended when running on parallel threads) -do_progressbar = 0; -if do_progressbar - progressbar('Total', 'Current parameters'); -end - -% Init times -for i = 1:numalgos - elapsed(algos(i)) = 0; -end - -% Init results structure -results = []; - - -% ======== -% Run -% ======== - -% Run GAP and NESTA first -if any(algos == gap) - for iy = 1:size(y,2) - a = gap; - % Compute epsilon - %epsilon = epsextrafactor * noiselevel * norm(y(:,iy)); - epsilon = 1.1 * norm(realnoise(:,iy)); - % - gapparams = []; - gapparams.num_iteration = 1000; - gapparams.greedy_level = 0.9; - gapparams.stopping_coefficient_size = 1e-4; - gapparams.l2solver = 'pseudoinverse'; - %gapparams.noise_level = noiselevel; - gapparams.noise_level = epsilon; - timer(a) = tic; - xrec{a}(:,iy) = GAP(y(:,iy), M, M', Omega, Omega', gapparams, zeros(d,1)); - elapsed(a) = elapsed(a) + toc(timer(a)); - % - err{a}(iy) = norm(x0(:,iy) - xrec{a}(:,iy)); - relerr{a}(iy) = err{a}(iy) / norm(x0(:,iy)); - end - disp([ algoname{a} ': avg relative error = ' num2str(mean(relerr{a}))]); -end -if any(algos == nesta) - for iy = 1:size(y,2) - a = nesta; - % Compute epsilon - %epsilon = epsextrafactor * noiselevel * norm(y(:,iy)); - epsilon = 1.1 * norm(realnoise(:,iy)); - % - try - timer(a) = tic; - %xrec{a}(:,iy) = do_nesta_DemoNonProjector(x0(:,iy), M, Omega', 0); - - % Common heuristic: delta = sqrt(m + 2*sqrt(2*m))*sigma ????????? - [U,S,V] = svd(M,'econ'); - opts.U = Omega; - opts.Ut = Omega'; - opts.USV.U=U; - opts.USV.S=S; - opts.USV.V=V; - opts.TolVar = 1e-5; - opts.Verbose = 0; - xrec{a}(:,iy) = NESTA(M, [], y(:,iy), 1e-3, epsilon, opts); - elapsed(a) = elapsed(a) + toc(timer(a)); - catch err - disp('*****ERROR: NESTA throwed error *****'); - xrec{a}(:,iy) = zeros(size(x0(:,iy))); - end - % - err{a}(iy) = norm(x0(:,iy) - xrec{a}(:,iy)); - relerr{a}(iy) = err{a}(iy) / norm(x0(:,iy)); - end - disp([ algoname{a} ': avg relative error = ' num2str(mean(relerr{a}))]); -end - -for i = 1:numel(algos) - if algos(i) == gap - continue; - end - prevgamma{algos(i)} = zeros(p, numvects); -end - -% Run ABS algorithms (lambda-dependent) -for iparam = 1:numel(lambdas) - - % Read current parameters - lambda = lambdas(iparam); - - % Init stuff - for i = 1:numel(algos) - if algos(i) == gap || algos(i) == nesta - continue; - end - xrec{algos(i)} = zeros(d, numvects); - end - % - for i = 1:numel(algos) - if algos(i) == gap || algos(i) == nesta - continue; - end - err{algos(i)} = zeros(numvects,1); - relerr{algos(i)} = zeros(numvects,1); - end - - % For every generated signal do - for iy = 1:size(y,2) - - % Compute epsilon - %epsilon = epsextrafactor * noiselevel * norm(y(:,iy)); - epsilon = 1.1 * norm(realnoise(:,iy)); - - %-------------------------------- - % Reconstruct (and measure delay), Compute reconstruction error - %-------------------------------- - for i = 1:numel(algos) - a = algos(i); - if a == gap || a == nesta - continue - end - if a == ompk - timer(a) = tic; - xrec{a}(:,iy) = ABS_OMPk_approx(y(:,iy), Omega, M, p-l, lambda); - elapsed(a) = elapsed(a) + toc(timer(a)); - elseif a == ompeps - timer(a) = tic; - xrec{a}(:,iy) = ABS_OMPeps_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * x0(:,iy)); - elapsed(a) = elapsed(a) + toc(timer(a)); - elseif a == tst - timer(a) = tic; - xrec{a}(:,iy) = ABS_TST_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * x0(:,iy)); - elapsed(a) = elapsed(a) + toc(timer(a)); - elseif a == bp - timer(a) = tic; - [xrec{a}(:,iy), prevgamma{a}(:,iy)] = ABS_BP_approx(y(:,iy), Omega, M, epsilon, lambda, prevgamma{a}(:,iy), Omega * x0(:,iy)); - elapsed(a) = elapsed(a) + toc(timer(a)); - elseif a == yall1 - timer(a) = tic; - xrec{a}(:,iy) = ABS_YALL1_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * x0(:,iy)); - elapsed(a) = elapsed(a) + toc(timer(a)); -% elseif a == gap -% gapparams = []; -% gapparams.num_iteration = 40; -% gapparams.greedy_level = 0.9; -% gapparams.stopping_coefficient_size = 1e-4; -% gapparams.l2solver = 'pseudoinverse'; -% %gapparams.noise_level = noiselevel; -% gapparams.noise_level = epsilon; -% timer(a) = tic; -% xrec{a}(:,iy) = GAP(y(:,iy), M, M', Omega, Omega', gapparams, zeros(d,1)); -% elapsed(a) = elapsed(a) + toc(timer(a)); -% elseif a == nesta -% try -% timer(a) = tic; -% %xrec{a}(:,iy) = do_nesta_DemoNonProjector(x0(:,iy), M, Omega', 0); -% -% % Common heuristic: delta = sqrt(m + 2*sqrt(2*m))*sigma ????????? -% [U,S,V] = svd(M,'econ'); -% opts.U = Omega; -% opts.Ut = Omega'; -% opts.USV.U=U; -% opts.USV.S=S; -% opts.USV.V=V; -% opts.TolVar = 1e-5; -% opts.Verbose = 0; -% xrec{a}(:,iy) = NESTA(M, [], y(:,iy), 1e-3, epsilon, opts); -% elapsed(a) = elapsed(a) + toc(timer(a)); -% catch err -% disp('*****ERROR: NESTA throwed error *****'); -% xrec{a}(:,iy) = zeros(size(x0(:,iy))); -% end - elseif a == sl0 - timer(a) = tic; - xrec{a}(:,iy) = ABS_SL0_approx(y(:,iy), Omega, M, epsilon, lambda); - elapsed(a) = elapsed(a) + toc(timer(a)); - elseif a == spgl1 - timer(a) = tic; - xrec{a}(:,iy) = ABS_SPGL1_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * xrec{bp}(:,iy)); - elapsed(a) = elapsed(a) + toc(timer(a)); - elseif a == nestasynth - timer(a) = tic; - xrec{a}(:,iy) = ABS_NESTAsynth_approx(y(:,iy), Omega, M, epsilon, lambda, Omega * xrec{bp}(:,iy)); - elapsed(a) = elapsed(a) + toc(timer(a)); - else - %error('No such algorithm!'); - end - % - % Compare to GAP instead! - %x0(:,iy) = xrec{gap}(:,iy); - % - err{a}(iy) = norm(x0(:,iy) - xrec{a}(:,iy)); - relerr{a}(iy) = err{a}(iy) / norm(x0(:,iy)); - end - - % Update progressbar -% if do_progressbar -% %frac2 = iy/numvects; -% %frac1 = ((iparam-1) + frac2) / count; -% if norm(frac2 - 1) < 1e-6 -% frac2 = 0; -% end -% frac2 = frac2 + incr2; -% frac1 = frac1 + incr1; -% progressbar(frac1, frac2); -% end - end - - %-------------------------------- - % Save results in big stucture & display - %-------------------------------- - % Save reconstructed signals - % Save rel & abs errors - % Display error - results(iparam).xrec = xrec; - results(iparam).err = err; - results(iparam).relerr = relerr; - % - %disp(['Simulation no. ' num2str(iparam)]); - disp(['Lambda = ' num2str(lambda) ':']); - for i = 1:numalgos - a = algos(i); - if a == gap || a == nesta - continue - end - disp([ algoname{a} ': avg relative error = ' num2str(mean(relerr{a}))]); - end -end - -% ================================= -% Save -% ================================= -if do_save_mat - save(matfilename); - disp(['Saved to ' matfilename]); -end - -% ================================= -% Plot -% ================================= -toplot = zeros(numel(lambdas), numalgos); - -relerrs = {results.relerr}; -for i = 1:numalgos - for j = 1:numel(lambdas) - toplot(j,i) = mean(relerrs{j}{algos(i)}); - end -end - -%h = plot(toplot); -h = semilogx(lambdas, toplot); -legend(algoname{algos}) -xlabel('Lambda') -ylabel('Average reconstruction error') -title('Reconstruction error with different algorithms') - -if (do_save_figs) - saveas(gcf, [figfilename '.fig']); - saveas(gcf, [figfilename '.png']); - saveTightFigure(gcf, [figfilename '.pdf']); -end -
--- a/scripts/utils.py Wed Dec 14 14:48:06 2011 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,229 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Wed Nov 09 12:28:55 2011 - -@author: ncleju -""" - -import numpy -import scipy.io -import matplotlib.pyplot as plt -import matplotlib.cm as cm -import matplotlib.colors as mcolors - -# Sample call -#utils.loadshowmatrices_multipixels('H:\\CS\\Python\\Results\\pt_std1\\approx_pt_std1.mat', dosave=True, saveplotbase='approx_pt_std1_',saveplotexts=('png','eps','pdf')) - -#def loadshowmatrices(filename, algonames = None): -# mdict = scipy.io.loadmat(filename) -# if algonames == None: -# algonames = mdict['algonames'] -# -# for algonameobj in algonames: -# algoname = algonameobj[0][0] -# print algoname -# if mdict['meanmatrix'][algoname][0,0].ndim == 2: -# plt.figure() -# plt.imshow(mdict['meanmatrix'][algoname][0,0], cmap=cm.gray, interpolation='nearest',origin='lower') -# elif mdict['meanmatrix'][algoname][0,0].ndim == 3: -# for matrix in mdict['meanmatrix'][algoname][0,0]: -# plt.figure() -# plt.imshow(matrix, cmap=cm.gray, interpolation='nearest',origin='lower') -# plt.show() -# print "Finished." -# -# -#def loadsavematrices(filename, saveplotbase, saveplotexts, algonames = None): -# -# mdict = scipy.io.loadmat(filename) -# lambdas = mdict['lambdas'] -# -# if algonames is None: -# algonames = mdict['algonames'] -# -# for algonameobj in algonames: -# algoname = algonameobj[0][0] -# print algoname -# if mdict['meanmatrix'][algoname][0,0].ndim == 2: -# plt.figure() -# plt.imshow(mdict['meanmatrix'][algoname][0,0], cmap=cm.gray, interpolation='nearest',origin='lower') -# for ext in saveplotexts: -# plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') -# elif mdict['meanmatrix'][algoname][0,0].ndim == 3: -# ilbd = 0 -# for matrix in mdict['meanmatrix'][algoname][0,0]: -# plt.figure() -# plt.imshow(matrix, cmap=cm.gray, interpolation='nearest',origin='lower') -# for ext in saveplotexts: -# plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') -# ilbd = ilbd + 1 -# print "Finished." - -def loadmatrices(filename, algonames=None, doshow=True, dosave=False, saveplotbase=None, saveplotexts=None): - - if dosave and (saveplotbase is None or saveplotexts is None): - print('Error: please specify name and extensions for saving') - raise Exception('Name or extensions for saving not specified') - - mdict = scipy.io.loadmat(filename) - - if dosave: - lambdas = mdict['lambdas'] - - if algonames is None: - algonames = mdict['algonames'] - - for algonameobj in algonames: - algoname = algonameobj[0][0] - print algoname - if mdict['meanmatrix'][algoname][0,0].ndim == 2: - plt.figure() - plt.imshow(mdict['meanmatrix'][algoname][0,0], cmap=cm.gray, norm=mcolors.Normalize(0,1), interpolation='nearest',origin='lower') - if dosave: - for ext in saveplotexts: - plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') - elif mdict['meanmatrix'][algoname][0,0].ndim == 3: - if dosave: - ilbd = 0 - for matrix in mdict['meanmatrix'][algoname][0,0]: - plt.figure() - plt.imshow(matrix, cmap=cm.gray, norm=mcolors.Normalize(0,1), interpolation='nearest',origin='lower') - if dosave: - for ext in saveplotexts: - plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') - ilbd = ilbd + 1 - - if doshow: - plt.show() - print "Finished." - - -def loadshowmatrices_multipixels(filename, algonames = None, doshow=True, dosave=False, saveplotbase=None, saveplotexts=None): - - if dosave and (saveplotbase is None or saveplotexts is None): - print('Error: please specify name and extensions for saving') - raise Exception('Name or extensions for saving not specified') - - mdict = scipy.io.loadmat(filename) - - if dosave: - lambdas = mdict['lambdas'] - - if algonames == None: - algonames = mdict['algonames'] - -# thresh = 0.90 - N = 10 -# border = 2 -# bordercolor = 0 # black - - for algonameobj in algonames: - algoname = algonameobj[0][0] - print algoname - if mdict['meanmatrix'][algoname][0,0].ndim == 2: - - # Prepare bigger matrix - rows,cols = mdict['meanmatrix'][algoname][0,0].shape - bigmatrix = numpy.zeros((N*rows,N*cols)) - for i in numpy.arange(rows): - for j in numpy.arange(cols): - bigmatrix[i*N:i*N+N,j*N:j*N+N] = mdict['meanmatrix'][algoname][0,0][i,j] - bigmatrix = int_drawseparation(mdict['meanmatrix'][algoname][0,0],bigmatrix,10,0.9,2,0) - bigmatrix = int_drawseparation(mdict['meanmatrix'][algoname][0,0],bigmatrix,10,0.8,2,0.2) - bigmatrix = int_drawseparation(mdict['meanmatrix'][algoname][0,0],bigmatrix,10,0.5,2,1) -# # Mark 95% border -# if mdict['meanmatrix'][algoname][0,0][i,j] > thresh: -# # Top border -# if mdict['meanmatrix'][algoname][0,0][i-1,j] < thresh and i>0: -# bigmatrix[i*N:i*N+border,j*N:j*N+N] = bordercolor -# # Bottom border -# if mdict['meanmatrix'][algoname][0,0][i+1,j] < thresh and i<rows-1: -# bigmatrix[i*N+N-border:i*N+N,j*N:j*N+N] = bordercolor -# # Left border -# if mdict['meanmatrix'][algoname][0,0][i,j-1] < thresh and j>0: -# bigmatrix[i*N:i*N+N,j*N:j*N+border] = bordercolor -# # Right border (not very probable) -# if j<cols-1 and mdict['meanmatrix'][algoname][0,0][i,j+1] < thresh: -# bigmatrix[i*N:i*N+N,j*N+N-border:j*N+N] = bordercolor - - plt.figure() - #plt.imshow(mdict['meanmatrix'][algoname][0,0], cmap=cm.gray, interpolation='nearest',origin='lower') - plt.imshow(bigmatrix, cmap=cm.gray, interpolation='nearest',origin='lower') - if dosave: - for ext in saveplotexts: - plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') - elif mdict['meanmatrix'][algoname][0,0].ndim == 3: - if dosave: - ilbd = 0 - - for matrix in mdict['meanmatrix'][algoname][0,0]: - - # Prepare bigger matrix - rows,cols = matrix.shape - bigmatrix = numpy.zeros((N*rows,N*cols)) - for i in numpy.arange(rows): - for j in numpy.arange(cols): - bigmatrix[i*N:i*N+N,j*N:j*N+N] = matrix[i,j] - bigmatrix = int_drawseparation(matrix,bigmatrix,10,0.9,2,0) - bigmatrix = int_drawseparation(matrix,bigmatrix,10,0.8,2,0.2) - bigmatrix = int_drawseparation(matrix,bigmatrix,10,0.5,2,1) -# # Mark 95% border -# if matrix[i,j] > thresh: -# # Top border -# if matrix[i-1,j] < thresh and i>0: -# bigmatrix[i*N:i*N+border,j*N:j*N+N] = bordercolor -# # Bottom border -# if matrix[i+1,j] < thresh and i<rows-1: -# bigmatrix[i*N+N-border:i*N+N,j*N:j*N+N] = bordercolor -# # Left border -# if matrix[i,j-1] < thresh and j>0: -# bigmatrix[i*N:i*N+N,j*N:j*N+border] = bordercolor -# # Right border (not very probable) -# if j<cols-1 and matrix[i,j+1] < thresh: -# bigmatrix[i*N:i*N+N,j*N+N-border:j*N+N] = bordercolor - - plt.figure() - #plt.imshow(matrix, cmap=cm.gray, interpolation='nearest',origin='lower') - plt.imshow(bigmatrix, cmap=cm.gray, interpolation='nearest',origin='lower') - if dosave: - for ext in saveplotexts: - plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') - ilbd = ilbd + 1 - if doshow: - plt.show() - print "Finished." - -def appendtomatfile(filename, toappend, toappendname): - mdict = scipy.io.loadmat(filename) - mdict[toappendname] = toappend - try: - scipy.io.savemat(filename, mdict) - except: - print "Save error" - - # To save to a cell array, create an object array: - # >>> obj_arr = np.zeros((2,), dtype=np.object) - # >>> obj_arr[0] = 1 - # >>> obj_arr[1] = 'a string' - -def int_drawseparation(matrix,bigmatrix,N,thresh,border,bordercolor): - rows,cols = matrix.shape - for i in numpy.arange(rows): - for j in numpy.arange(cols): - # Mark border - # Use top-left corner of current square for reference - if matrix[i,j] > thresh: - # Top border - if matrix[i-1,j] < thresh and i>0: - bigmatrix[i*N:i*N+border,j*N:j*N+N] = bordercolor - # Bottom border - if i<rows-1 and matrix[i+1,j] < thresh: - bigmatrix[i*N+N-border:i*N+N,j*N:j*N+N] = bordercolor - # Left border - if matrix[i,j-1] < thresh and j>0: - bigmatrix[i*N:i*N+N,j*N:j*N+border] = bordercolor - # Right border (not very probable) - if j<cols-1 and matrix[i,j+1] < thresh: - bigmatrix[i*N:i*N+N,j*N+N-border:j*N+N] = bordercolor - - return bigmatrix \ No newline at end of file