Mercurial > hg > pycsalgos
changeset 32:e1da5140c9a5
Count and display finished jobs in run_once_tuple()
Disabled "dictionary not normalized" message in OMP
Fixed bug that displayed same values for all algorithms
Made TST default iterations nsweep = 300
author | nikcleju |
---|---|
date | Fri, 11 Nov 2011 15:35:55 +0000 |
parents | 829bf04c92af |
children | 116dcfacd1cc |
files | pyCSalgos/OMP/omp_QR.py scripts/ABSapprox.py scripts/ABSapproxSingle.py |
diffstat | 3 files changed, 303 insertions(+), 41 deletions(-) [+] |
line wrap: on
line diff
--- a/pyCSalgos/OMP/omp_QR.py Thu Nov 10 22:48:39 2011 +0000 +++ b/pyCSalgos/OMP/omp_QR.py Fri Nov 11 15:35:55 2011 +0000 @@ -302,10 +302,9 @@ # end mask = np.zeros(m) mask[math.floor(np.random.rand() * m)] = 1 - nP = np.linalg.norm(P(mask)) - if abs(1-nP) > 1e-3: - print 'Dictionary appears not to have unit norm columns.' - #end + #nP = np.linalg.norm(P(mask)) + #if abs(1-nP) > 1e-3: + # print 'Dictionary appears not to have unit norm columns.' ########################################################################### # Check if we have enough memory and initialise
--- a/scripts/ABSapprox.py Thu Nov 10 22:48:39 2011 +0000 +++ b/scripts/ABSapprox.py Fri Nov 11 15:35:55 2011 +0000 @@ -83,7 +83,9 @@ aggD = np.concatenate((aggDupper, lbd * aggDlower)) aggy = np.concatenate((y, np.zeros(N-n))) - return pyCSalgos.RecomTST.RecommendedTST.RecommendedTST(aggD, aggy, nsweep=3000, tol=epsilon / np.linalg.norm(aggy)) + nsweep = 300 + tol = epsilon / np.linalg.norm(aggy) + return pyCSalgos.RecomTST.RecommendedTST.RecommendedTST(aggD, aggy, nsweep=nsweep, tol=tol) #========================== # Define tuples (algorithm function, name) @@ -98,7 +100,20 @@ # 1. Algorithms not depending on lambda algosN = gap, # tuple # 2. Algorithms depending on lambda (our ABS approach) -algosL = sl0,bp,ompeps,tst # tuple +#algosL = sl0,bp,ompeps,tst # tuple +algosL = sl0,tst + +#========================== +# Pool initializer function (multiprocessing) +# Needed to pass the shared variable to the worker processes +# The variables must be global in the module in order to be seen later in run_once_tuple() +# see http://stackoverflow.com/questions/1675766/how-to-combine-pool-map-with-array-shared-memory-in-python-multiprocessing +#========================== +def initProcess(share, njobs): + import sys + currmodule = sys.modules[__name__] + currmodule.proccount = share + currmodule.njobs = njobs #========================== # Interface functions @@ -119,10 +134,10 @@ #Set up standard experiment parameters d = 50.0; sigma = 2.0 - deltas = np.arange(0.05,1.,0.05) - rhos = np.arange(0.05,1.,0.05) - #deltas = np.array([0.05, 0.45, 0.95]) - #rhos = np.array([0.05, 0.45, 0.95]) + #deltas = np.arange(0.05,1.,0.05) + #rhos = np.arange(0.05,1.,0.05) + deltas = np.array([0.05, 0.45, 0.95]) + rhos = np.array([0.05, 0.45, 0.95]) #deltas = np.array([0.05]) #rhos = np.array([0.05]) #delta = 0.8; @@ -158,7 +173,12 @@ print "Running phase transition ( run_multi() )" if doparallel: - from multiprocessing import Pool + import multiprocessing + # Shared value holding the number of finished processes + # Add it as global of the module + import sys + currmodule = sys.modules[__name__] + currmodule.proccount = multiprocessing.Value('I', 0) # 'I' = unsigned int, see docs (multiprocessing, array) if dosaveplot or doshowplot: try: @@ -218,14 +238,17 @@ #Save the parameters, and run after print " delta = ",delta," rho = ",rho jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) - + + if doparallel: + currmodule.njobs = deltas.size * rhos.size print "End of parameters" # Run jobresults = [] + if doparallel: - pool = Pool(4) - jobresults = pool.map(run_once_tuple,jobparams) + pool = multiprocessing.Pool(4,initializer=initProcess,initargs=(currmodule.proccount,currmodule.njobs)) + jobresults = pool.map(run_once_tuple, jobparams) else: for jobparam in jobparams: jobresults.append(run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0)) @@ -245,29 +268,22 @@ meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 - - # # Prepare matrices to show - # showmats = dict() - # for i,algo in zip(np.arange(nalgosN),algosN): - # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) - # for i,algo in zip(np.arange(nalgosL),algosL): - # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) - # Save - if dosavedata: - tosave = dict() - tosave['meanmatrix'] = meanmatrix - tosave['d'] = d - tosave['sigma'] = sigma - tosave['deltas'] = deltas - tosave['rhos'] = rhos - tosave['numvects'] = numvects - tosave['SNRdb'] = SNRdb - tosave['lambdas'] = lambdas - try: - scipy.io.savemat(savedataname, tosave) - except: - print "Save error" + # Save + if dosavedata: + tosave = dict() + tosave['meanmatrix'] = meanmatrix + tosave['d'] = d + tosave['sigma'] = sigma + tosave['deltas'] = deltas + tosave['rhos'] = rhos + tosave['numvects'] = numvects + tosave['SNRdb'] = SNRdb + tosave['lambdas'] = lambdas + try: + scipy.io.savemat(savedataname, tosave) + except: + print "Save error" # Show if doshowplot or dosaveplot: for algotuple in algosN: @@ -291,7 +307,14 @@ print "Finished." def run_once_tuple(t): - return run_once(*t) + results = run_once(*t) + import sys + currmodule = sys.modules[__name__] + currmodule.proccount.value = currmodule.proccount.value + 1 + print "================================" + print "Finished job",currmodule.proccount.value,"of",currmodule.njobs + print "================================" + return results def run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): @@ -322,8 +345,8 @@ xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) - for algotuple in algosN: - print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) + for algofunc,strname in algosN: + print strname,' : avg relative error = ',np.mean(relerr[strname]) # Run algorithms with Lambda for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): @@ -337,8 +360,8 @@ err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) print 'Lambda = ',lbd,' :' - for algotuple in algosL: - print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) + for algofunc,strname in algosL: + print ' ',strname,' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) # Prepare results mrelerrN = dict()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scripts/ABSapproxSingle.py Fri Nov 11 15:35:55 2011 +0000 @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Nov 05 18:08:40 2011 + +@author: Nic +""" + +import numpy as np +import scipy.io +import math +doplot = True +try: + import matplotlib.pyplot as plt +except: + doplot = False +if doplot: + import matplotlib.cm as cm +import pyCSalgos +import pyCSalgos.GAP.GAP +import pyCSalgos.SL0.SL0_approx + +# Define functions that prepare arguments for each algorithm call +def run_gap(y,M,Omega,epsilon): + gapparams = {"num_iteration" : 1000,\ + "greedy_level" : 0.9,\ + "stopping_coefficient_size" : 1e-4,\ + "l2solver" : 'pseudoinverse',\ + "noise_level": epsilon} + return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0] + +def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = np.linalg.pinv(Omega) + #U,S,Vt = np.linalg.svd(D) + aggDupper = np.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = np.concatenate((aggDupper, lbd * aggDlower)) + aggy = np.concatenate((y, np.zeros(N-n))) + + sigmamin = 0.001 + sigma_decrease_factor = 0.5 + mu_0 = 2 + L = 10 + return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) + +def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = np.linalg.pinv(Omega) + #U,S,Vt = np.linalg.svd(D) + aggDupper = np.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = np.concatenate((aggDupper, lbd * aggDlower)) + aggy = np.concatenate((y, np.zeros(N-n))) + + sigmamin = 0.001 + sigma_decrease_factor = 0.5 + mu_0 = 2 + L = 10 + return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) + + +# Define tuples (algorithm setup function, algorithm function, name) +gap = (run_gap, 'GAP') +sl0 = (run_sl0, 'SL0_approx') + +# Define which algorithms to run +# 1. Algorithms not depending on lambda +algosN = gap, # tuple +# 2. Algorithms depending on lambda (our ABS approach) +algosL = sl0, # tuple + +def mainrun(): + + nalgosN = len(algosN) + nalgosL = len(algosL) + + #Set up experiment parameters + d = 50.0; + sigma = 2.0 + #deltas = np.arange(0.05,1.,0.05) + #rhos = np.arange(0.05,1.,0.05) + deltas = np.array([0.05, 0.45, 0.95]) + rhos = np.array([0.05, 0.45, 0.95]) + #deltas = np.array([0.05]) + #rhos = np.array([0.05]) + #delta = 0.8; + #rho = 0.15; + numvects = 100; # Number of vectors to generate + SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + #lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) + lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000]) + + meanmatrix = dict() + for i,algo in zip(np.arange(nalgosN),algosN): + meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) + for i,algo in zip(np.arange(nalgosL),algosL): + meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) + + for idelta,delta in zip(np.arange(deltas.size),deltas): + for irho,rho in zip(np.arange(rhos.size),rhos): + + # Generate data and operator + Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) + + # Run algorithms + print "***** delta = ",delta," rho = ",rho + mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) + + for algotuple in algosN: + meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] + if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): + meanmatrix[algotuple[1]][irho,idelta] = 0 + for algotuple in algosL: + for ilbd in np.arange(lambdas.size): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] + if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 + + # # Prepare matrices to show + # showmats = dict() + # for i,algo in zip(np.arange(nalgosN),algosN): + # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) + # for i,algo in zip(np.arange(nalgosL),algosL): + # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) + + # Save + tosave = dict() + tosave['meanmatrix'] = meanmatrix + tosave['d'] = d + tosave['sigma'] = sigma + tosave['deltas'] = deltas + tosave['rhos'] = rhos + tosave['numvects'] = numvects + tosave['SNRdb'] = SNRdb + tosave['lambdas'] = lambdas + try: + scipy.io.savemat('ABSapprox.mat',tosave) + except TypeError: + print "Oops, Type Error" + raise + # Show + if doplot: + for algotuple in algosN: + plt.figure() + plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower') + for algotuple in algosL: + for ilbd in np.arange(lambdas.size): + plt.figure() + plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower') + plt.show() + print "Finished." + +def genData(d,sigma,delta,rho,numvects,SNRdb): + + # Process parameters + noiselevel = 1.0 / (10.0**(SNRdb/10.0)); + p = round(sigma*d); + m = round(delta*d); + l = round(d - rho*m); + + # Generate Omega and data based on parameters + Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); + # Optionally make Omega more coherent + U,S,Vt = np.linalg.svd(Omega); + Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! + Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) + Omega = np.dot(U , np.dot(Snew,Vt)) + + # Generate data + x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); + + return Omega,x0,y,M,realnoise + +def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): + + d = Omega.shape[1] + + nalgosN = len(algosN) + nalgosL = len(algosL) + + xrec = dict() + err = dict() + relerr = dict() + + # Prepare storage variables for algorithms non-Lambda + for i,algo in zip(np.arange(nalgosN),algosN): + xrec[algo[1]] = np.zeros((d, y.shape[1])) + err[algo[1]] = np.zeros(y.shape[1]) + relerr[algo[1]] = np.zeros(y.shape[1]) + # Prepare storage variables for algorithms with Lambda + for i,algo in zip(np.arange(nalgosL),algosL): + xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) + err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + + # Run algorithms non-Lambda + for iy in np.arange(y.shape[1]): + for algofunc,strname in algosN: + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) + err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) + relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) + for algotuple in algosN: + print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) + + # Run algorithms with Lambda + for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): + for iy in np.arange(y.shape[1]): + D = np.linalg.pinv(Omega) + U,S,Vt = np.linalg.svd(D) + for algofunc,strname in algosL: + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) + xrec[strname][ilbd,:,iy] = np.dot(D,gamma) + err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) + relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) + print 'Lambda = ',lbd,' :' + for algotuple in algosL: + print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) + + # Prepare results + mrelerrN = dict() + for algotuple in algosN: + mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) + mrelerrL = dict() + for algotuple in algosL: + mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) + + return mrelerrN,mrelerrL + +# Script main +if __name__ == "__main__": + + #import cProfile + #cProfile.run('mainrun()', 'profile') + mainrun() \ No newline at end of file