Mercurial > hg > absrec
changeset 0:cd7cbcb03d49
Changed dictionary structure
author | nikcleju |
---|---|
date | Wed, 14 Dec 2011 14:37:20 +0000 |
parents | |
children | 4f74c1d0f957 |
files | ABSapprox.py ABSapproxMultiproc.py ABSapproxPP.py ABSapproxSingle.py algos.py stdparams.py utils.py |
diffstat | 7 files changed, 1722 insertions(+), 0 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/ABSapprox.py Wed Dec 14 14:37:20 2011 +0000 @@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Nov 05 18:08:40 2011 + +@author: Nic +""" + +import numpy as np +import scipy.io +import math +import os +import time + +import stdparams +import pyCSalgos.Analysis + +#========================== +# Pool initializer function (multiprocessing) +# Needed to pass the shared variable to the worker processes +# The variables must be global in the module in order to be seen later in run_once_tuple() +# see http://stackoverflow.com/questions/1675766/how-to-combine-pool-map-with-array-shared-memory-in-python-multiprocessing +#========================== +def initProcess(share, njobs): + import sys + currmodule = sys.modules[__name__] + currmodule.proccount = share + currmodule.njobs = njobs + +#========================== +# Interface run functions +#========================== +def run_mp(std=stdparams.std2,ncpus=None): + + algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,doshowplot,dosaveplot,saveplotbase,saveplotexts = std() + run_multi(algosN, algosL, d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata=dosavedata,savedataname=savedataname,\ + doparallel=True, ncpus=ncpus,\ + doshowplot=doshowplot,dosaveplot=dosaveplot,saveplotbase=saveplotbase,saveplotexts=saveplotexts) + +def run(std=stdparams.std2): + algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,doshowplot,dosaveplot,saveplotbase,saveplotexts = std() + run_multi(algosN, algosL, d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata=dosavedata,savedataname=savedataname,\ + doparallel=False,\ + doshowplot=doshowplot,dosaveplot=dosaveplot,saveplotbase=saveplotbase,saveplotexts=saveplotexts) +#========================== +# Main functions +#========================== +def run_multi(algosN, algosL, d, sigma, deltas, rhos, lambdas, numvects, SNRdb, + doparallel=False, ncpus=None,\ + doshowplot=False, dosaveplot=False, saveplotbase=None, saveplotexts=None,\ + dosavedata=False, savedataname=None): + + print "This is analysis recovery ABS approximation script by Nic" + print "Running phase transition ( run_multi() )" + + # Not only for parallel + #if doparallel: + import multiprocessing + # Shared value holding the number of finished processes + # Add it as global of the module + import sys + currmodule = sys.modules[__name__] + currmodule.proccount = multiprocessing.Value('I', 0) # 'I' = unsigned int, see docs (multiprocessing, array) + + if dosaveplot or doshowplot: + try: + import matplotlib + if doshowplot or os.name == 'nt': + print "Importing matplotlib with default (GUI) backend... ", + else: + print "Importing matplotlib with \"Cairo\" backend... ", + matplotlib.use('Cairo') + import matplotlib.pyplot as plt + import matplotlib.cm as cm + print "OK" + except: + print "FAIL" + print "Importing matplotlib.pyplot failed. No figures at all" + print "Try selecting a different backend" + doshowplot = False + dosaveplot = False + + # Print summary of parameters + print "Parameters:" + if doparallel: + if ncpus is None: + print " Running in parallel with default threads using \"multiprocessing\" package" + else: + print " Running in parallel with",ncpus,"threads using \"multiprocessing\" package" + else: + print "Running single thread" + if doshowplot: + print " Showing figures" + else: + print " Not showing figures" + if dosaveplot: + print " Saving figures as "+saveplotbase+"* with extensions ",saveplotexts + else: + print " Not saving figures" + print " Running algorithms",[algotuple[1] for algotuple in algosN],[algotuple[1] for algotuple in algosL] + + nalgosN = len(algosN) + nalgosL = len(algosL) + + meanmatrix = dict() + elapsed = dict() + for i,algo in zip(np.arange(nalgosN),algosN): + meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) + elapsed[algo[1]] = 0 + for i,algo in zip(np.arange(nalgosL),algosL): + meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) + elapsed[algo[1]] = np.zeros(lambdas.size) + + # Prepare parameters + jobparams = [] + print " (delta, rho) pairs to be run:" + for idelta,delta in zip(np.arange(deltas.size),deltas): + for irho,rho in zip(np.arange(rhos.size),rhos): + + # Generate data and operator + Omega,x0,y,M,realnoise = generateData(d,sigma,delta,rho,numvects,SNRdb) + + #Save the parameters, and run after + print " delta = ",delta," rho = ",rho + jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) + + # Not only for parallel + #if doparallel: + currmodule.njobs = deltas.size * rhos.size + print "End of parameters" + + # Run + jobresults = [] + + if doparallel: + pool = multiprocessing.Pool(4,initializer=initProcess,initargs=(currmodule.proccount,currmodule.njobs)) + jobresults = pool.map(run_once_tuple, jobparams) + else: + for jobparam in jobparams: + jobresults.append(run_once_tuple(jobparam)) + + # Read results + idx = 0 + for idelta,delta in zip(np.arange(deltas.size),deltas): + for irho,rho in zip(np.arange(rhos.size),rhos): + mrelerrN,mrelerrL,addelapsed = jobresults[idx] + idx = idx+1 + for algotuple in algosN: + meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] + if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): + meanmatrix[algotuple[1]][irho,idelta] = 0 + elapsed[algotuple[1]] = elapsed[algotuple[1]] + addelapsed[algotuple[1]] + for algotuple in algosL: + for ilbd in np.arange(lambdas.size): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] + if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 + elapsed[algotuple[1]][ilbd] = elapsed[algotuple[1]][ilbd] + addelapsed[algotuple[1]][ilbd] + + # Save + if dosavedata: + tosave = dict() + tosave['meanmatrix'] = meanmatrix + tosave['elapsed'] = elapsed + tosave['d'] = d + tosave['sigma'] = sigma + tosave['deltas'] = deltas + tosave['rhos'] = rhos + tosave['numvects'] = numvects + tosave['SNRdb'] = SNRdb + tosave['lambdas'] = lambdas + # Save algo names as cell array + obj_arr = np.zeros((len(algosN)+len(algosL),), dtype=np.object) + idx = 0 + for algotuple in algosN+algosL: + obj_arr[idx] = algotuple[1] + idx = idx+1 + tosave['algonames'] = obj_arr + try: + scipy.io.savemat(savedataname, tosave) + except: + print "Save error" + # Show + if doshowplot or dosaveplot: + for algotuple in algosN: + algoname = algotuple[1] + plt.figure() + plt.imshow(meanmatrix[algoname], cmap=cm.gray, interpolation='nearest',origin='lower') + if dosaveplot: + for ext in saveplotexts: + plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') + for algotuple in algosL: + algoname = algotuple[1] + for ilbd in np.arange(lambdas.size): + plt.figure() + plt.imshow(meanmatrix[algoname][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower') + if dosaveplot: + for ext in saveplotexts: + plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') + if doshowplot: + plt.show() + + print "Finished." + +def run_once_tuple(t): + results = run_once(*t) + import sys + currmodule = sys.modules[__name__] + currmodule.proccount.value = currmodule.proccount.value + 1 + print "================================" + print "Finished job",currmodule.proccount.value,"of",currmodule.njobs + print "================================" + return results + +def run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): + + d = Omega.shape[1] + + nalgosN = len(algosN) + nalgosL = len(algosL) + + + xrec = dict() + err = dict() + relerr = dict() + elapsed = dict() + + # Prepare storage variables for algorithms non-Lambda + for i,algo in zip(np.arange(nalgosN),algosN): + xrec[algo[1]] = np.zeros((d, y.shape[1])) + err[algo[1]] = np.zeros(y.shape[1]) + relerr[algo[1]] = np.zeros(y.shape[1]) + elapsed[algo[1]] = 0 + # Prepare storage variables for algorithms with Lambda + for i,algo in zip(np.arange(nalgosL),algosL): + xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) + err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + elapsed[algo[1]] = np.zeros(lambdas.size) + + # Run algorithms non-Lambda + for iy in np.arange(y.shape[1]): + for algofunc,strname in algosN: + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + try: + timestart = time.time() + xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) + elapsed[strname] = elapsed[strname] + (time.time() - timestart) + except pyCSalgos.BP.l1qec.l1qecInputValueError as e: + print "Caught exception when running algorithm",strname," :",e.message + except pyCSalgos.NESTA.NESTA.NestaError as e: + print "Caught exception when running algorithm",strname," :",e.message + err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) + relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) + for algofunc,strname in algosN: + print strname,' : avg relative error = ',np.mean(relerr[strname]) + + # Run algorithms with Lambda + for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): + for iy in np.arange(y.shape[1]): + D = np.linalg.pinv(Omega) + U,S,Vt = np.linalg.svd(D) + for algofunc,strname in algosL: + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + try: + timestart = time.time() + gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) + elapsed[strname][ilbd] = elapsed[strname][ilbd] + (time.time() - timestart) + except pyCSalgos.BP.l1qc.l1qcInputValueError as e: + print "Caught exception when running algorithm",strname," :",e.message + xrec[strname][ilbd,:,iy] = np.dot(D,gamma) + err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) + relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) + print 'Lambda = ',lbd,' :' + for algofunc,strname in algosL: + print ' ',strname,' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) + + # Prepare results + mrelerrN = dict() + for algotuple in algosN: + mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) + mrelerrL = dict() + for algotuple in algosL: + mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) + + return mrelerrN,mrelerrL,elapsed + + + +def generateData(d,sigma,delta,rho,numvects,SNRdb): + + # Process parameters + noiselevel = 1.0 / (10.0**(SNRdb/10.0)); + p = round(sigma*d); + m = round(delta*d); + l = round(d - rho*m); + + # Generate Omega and data based on parameters + Omega = pyCSalgos.Analysis.Generate_Analysis_Operator(d, p); + # Optionally make Omega more coherent + U,S,Vt = np.linalg.svd(Omega); + Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! + Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) + Omega = np.dot(U , np.dot(Snew,Vt)) + + # Generate data + x0,y,M,Lambda,realnoise = pyCSalgos.Analysis.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); + + return Omega,x0,y,M,realnoise + + +def runsingleexampledebug(): + d = 50.0; + sigma = 2.0 + delta = 0.9 + rho = 0.05 + numvects = 20; # Number of vectors to generate + SNRdb = 7.; # This is norm(signal)/norm(noise), so power, not energy + lbd = 10000 + + Omega,x0,y,M,realnoise = generateData(d,sigma,delta,rho,numvects,SNRdb) + D = np.linalg.pinv(Omega) + U,S,Vt = np.linalg.svd(D) + + xrec = np.zeros((d, y.shape[1])) + err = np.zeros((y.shape[1])) + relerr = np.zeros((y.shape[1])) + + for iy in np.arange(y.shape[1]): + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + gamma = run_sl0(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) + xrec[:,iy] = np.dot(D,gamma) + err[iy] = np.linalg.norm(x0[:,iy] - xrec[:,iy]) + relerr[iy] = err[iy] / np.linalg.norm(x0[:,iy]) + + print "Finished runsingleexampledebug()" + +# Script main +if __name__ == "__main__": + #import cProfile + #cProfile.run('mainrun()', 'profile') + run(stdparams.stdtest) + #runsingleexampledebug()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/ABSapproxMultiproc.py Wed Dec 14 14:37:20 2011 +0000 @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Nov 05 18:08:40 2011 + +@author: Nic +""" + +import numpy as np +import scipy.io +import math +from multiprocessing import Pool +doplot = True +try: + import matplotlib.pyplot as plt +except: + doplot = False +if doplot: + import matplotlib.cm as cm +import pyCSalgos +import pyCSalgos.GAP.GAP +import pyCSalgos.SL0.SL0_approx + +# Define functions that prepare arguments for each algorithm call +def run_gap(y,M,Omega,epsilon): + gapparams = {"num_iteration" : 1000,\ + "greedy_level" : 0.9,\ + "stopping_coefficient_size" : 1e-4,\ + "l2solver" : 'pseudoinverse',\ + "noise_level": epsilon} + return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0] + +def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = np.linalg.pinv(Omega) + #U,S,Vt = np.linalg.svd(D) + aggDupper = np.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = np.concatenate((aggDupper, lbd * aggDlower)) + aggy = np.concatenate((y, np.zeros(N-n))) + + sigmamin = 0.001 + sigma_decrease_factor = 0.5 + mu_0 = 2 + L = 10 + return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) + +def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = np.linalg.pinv(Omega) + #U,S,Vt = np.linalg.svd(D) + aggDupper = np.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = np.concatenate((aggDupper, lbd * aggDlower)) + aggy = np.concatenate((y, np.zeros(N-n))) + + sigmamin = 0.001 + sigma_decrease_factor = 0.5 + mu_0 = 2 + L = 10 + return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) + + +# Define tuples (algorithm setup function, algorithm function, name) +gap = (run_gap, 'GAP') +sl0 = (run_sl0, 'SL0_approx') + +# Define which algorithms to run +# 1. Algorithms not depending on lambda +algosN = gap, # tuple +# 2. Algorithms depending on lambda (our ABS approach) +algosL = sl0, # tuple + +def mainrun(): + + nalgosN = len(algosN) + nalgosL = len(algosL) + + #Set up experiment parameters + d = 50.0; + sigma = 2.0 + #deltas = np.arange(0.05,1.,0.05) + #rhos = np.arange(0.05,1.,0.05) + deltas = np.array([0.05, 0.45, 0.95]) + rhos = np.array([0.05, 0.45, 0.95]) + #deltas = np.array([0.05]) + #rhos = np.array([0.05]) + #delta = 0.8; + #rho = 0.15; + numvects = 100; # Number of vectors to generate + SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + #lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) + lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000]) + + meanmatrix = dict() + for i,algo in zip(np.arange(nalgosN),algosN): + meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) + for i,algo in zip(np.arange(nalgosL),algosL): + meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) + + jobparams = [] + for idelta,delta in zip(np.arange(deltas.size),deltas): + for irho,rho in zip(np.arange(rhos.size),rhos): + + # Generate data and operator + Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) + + # Run algorithms + print "***** delta = ",delta," rho = ",rho + #mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) + jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) + + pool = Pool(4) + jobresults = pool.map(runoncetuple,jobparams) + + idx = 0 + for idelta,delta in zip(np.arange(deltas.size),deltas): + for irho,rho in zip(np.arange(rhos.size),rhos): + mrelerrN,mrelerrL = jobresults[idx] + idx = idx+1 + for algotuple in algosN: + meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] + if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): + meanmatrix[algotuple[1]][irho,idelta] = 0 + for algotuple in algosL: + for ilbd in np.arange(lambdas.size): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] + if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 + + # # Prepare matrices to show + # showmats = dict() + # for i,algo in zip(np.arange(nalgosN),algosN): + # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) + # for i,algo in zip(np.arange(nalgosL),algosL): + # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) + + # Save + tosave = dict() + tosave['meanmatrix'] = meanmatrix + tosave['d'] = d + tosave['sigma'] = sigma + tosave['deltas'] = deltas + tosave['rhos'] = rhos + tosave['numvects'] = numvects + tosave['SNRdb'] = SNRdb + tosave['lambdas'] = lambdas + try: + scipy.io.savemat('ABSapprox.mat',tosave) + except TypeError: + print "Oops, Type Error" + raise + # Show + if doplot: + for algotuple in algosN: + plt.figure() + plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower') + for algotuple in algosL: + for ilbd in np.arange(lambdas.size): + plt.figure() + plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower') + plt.show() + print "Finished." + +def genData(d,sigma,delta,rho,numvects,SNRdb): + + # Process parameters + noiselevel = 1.0 / (10.0**(SNRdb/10.0)); + p = round(sigma*d); + m = round(delta*d); + l = round(d - rho*m); + + # Generate Omega and data based on parameters + Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); + # Optionally make Omega more coherent + U,S,Vt = np.linalg.svd(Omega); + Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! + Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) + Omega = np.dot(U , np.dot(Snew,Vt)) + + # Generate data + x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); + + return Omega,x0,y,M,realnoise + +def runoncetuple(t): + return runonce(*t) + +def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): + + d = Omega.shape[1] + + nalgosN = len(algosN) + nalgosL = len(algosL) + + xrec = dict() + err = dict() + relerr = dict() + + # Prepare storage variables for algorithms non-Lambda + for i,algo in zip(np.arange(nalgosN),algosN): + xrec[algo[1]] = np.zeros((d, y.shape[1])) + err[algo[1]] = np.zeros(y.shape[1]) + relerr[algo[1]] = np.zeros(y.shape[1]) + # Prepare storage variables for algorithms with Lambda + for i,algo in zip(np.arange(nalgosL),algosL): + xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) + err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + + # Run algorithms non-Lambda + for iy in np.arange(y.shape[1]): + for algofunc,strname in algosN: + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) + err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) + relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) + for algotuple in algosN: + print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) + + # Run algorithms with Lambda + for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): + for iy in np.arange(y.shape[1]): + D = np.linalg.pinv(Omega) + U,S,Vt = np.linalg.svd(D) + for algofunc,strname in algosL: + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) + xrec[strname][ilbd,:,iy] = np.dot(D,gamma) + err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) + relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) + print 'Lambda = ',lbd,' :' + for algotuple in algosL: + print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) + + # Prepare results + mrelerrN = dict() + for algotuple in algosN: + mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) + mrelerrL = dict() + for algotuple in algosL: + mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) + + return mrelerrN,mrelerrL + +# Script main +if __name__ == "__main__": + #import cProfile + #cProfile.run('mainrun()', 'profile') + + mainrun()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/ABSapproxPP.py Wed Dec 14 14:37:20 2011 +0000 @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Nov 05 18:08:40 2011 + +@author: Nic +""" + +import numpy +import scipy.io +import math +#import matplotlib.pyplot as plt +#import matplotlib.cm as cm +import pp +import pyCSalgos +import pyCSalgos.GAP.GAP +import pyCSalgos.SL0.SL0_approx + +# Define functions that prepare arguments for each algorithm call +def run_gap(y,M,Omega,epsilon): + gapparams = {"num_iteration" : 1000,\ + "greedy_level" : 0.9,\ + "stopping_coefficient_size" : 1e-4,\ + "l2solver" : 'pseudoinverse',\ + "noise_level": epsilon} + return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,numpy.zeros(Omega.shape[1]))[0] + +def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = numpy.linalg.pinv(Omega) + #U,S,Vt = numpy.linalg.svd(D) + aggDupper = numpy.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) + aggy = numpy.concatenate((y, numpy.zeros(N-n))) + + sigmamin = 0.001 + sigma_decrease_factor = 0.5 + mu_0 = 2 + L = 10 + return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) + +# Define tuples (algorithm setup function, algorithm function, name) +gap = (run_gap, 'GAP') +sl0 = (run_sl0, 'SL0_approx') + +# Define which algorithms to run +# 1. Algorithms not depending on lambda +algosN = gap, # tuple +# 2. Algorithms depending on lambda (our ABS approach) +algosL = sl0, # tuple + +def mainrun(): + + nalgosN = len(algosN) + nalgosL = len(algosL) + + #Set up experiment parameters + d = 50; + sigma = 2.0 + #deltas = numpy.arange(0.05,0.95,0.05) + #rhos = numpy.arange(0.05,0.95,0.05) + deltas = numpy.array([0.05, 0.45, 0.95]) + rhos = numpy.array([0.05, 0.45, 0.95]) + #deltas = numpy.array([0.05]) + #rhos = numpy.array([0.05]) + #delta = 0.8; + #rho = 0.15; + numvects = 10; # Number of vectors to generate + SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.concatenate((numpy.array([0]), 10**numpy.linspace(-5, 4, 10))) + + meanmatrix = dict() + for i,algo in zip(numpy.arange(nalgosN),algosN): + meanmatrix[algo[1]] = numpy.zeros((rhos.size, deltas.size)) + for i,algo in zip(numpy.arange(nalgosL),algosL): + meanmatrix[algo[1]] = numpy.zeros((lambdas.size, rhos.size, deltas.size)) + + # PP: start job server + job_server = pp.Server(ncpus = 4) + idx = 0 + jobparams = [] + for idelta,delta in zip(numpy.arange(deltas.size),deltas): + for irho,rho in zip(numpy.arange(rhos.size),rhos): + + # Generate data and operator + Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) + + jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0)) + + idx = idx + 1 + + # Run algorithms + modules = ('numpy','pyCSalgos','pyCSalgos.GAP.GAP','pyCSalgos.SL0.SL0_approx') + depfuncs = () + jobs = [job_server.submit(runonce, jobparam, (run_gap,run_sl0), modules, depfuncs) for jobparam in jobparams] + #funcarray[idelta,irho] = job_server.submit(runonce,(algosN,algosL, Omega,y,lambdas,realnoise,M,x0), (run_gap,run_sl0)) + #mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) + + # Get data from jobs + idx = 0 + for idelta,delta in zip(numpy.arange(deltas.size),deltas): + for irho,rho in zip(numpy.arange(rhos.size),rhos): + print "***** delta = ",delta," rho = ",rho + mrelerrN,mrelerrL = jobs[idx]() + for algotuple in algosN: + meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] + if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): + meanmatrix[algotuple[1]][irho,idelta] = 0 + for algotuple in algosL: + for ilbd in numpy.arange(lambdas.size): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] + if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 + idx = idx + 1 + + # # Prepare matrices to show + # showmats = dict() + # for i,algo in zip(numpy.arange(nalgosN),algosN): + # showmats[algo[1]] = numpy.zeros(rhos.size, deltas.size) + # for i,algo in zip(numpy.arange(nalgosL),algosL): + # showmats[algo[1]] = numpy.zeros(lambdas.size, rhos.size, deltas.size) + + # Save + tosave = dict() + tosave['meanmatrix'] = meanmatrix + tosave['d'] = d + tosave['sigma'] = sigma + tosave['deltas'] = deltas + tosave['rhos'] = rhos + tosave['numvects'] = numvects + tosave['SNRdb'] = SNRdb + tosave['lambdas'] = lambdas + try: + scipy.io.savemat('ABSapprox.mat',tosave) + except TypeError: + print "Oops, Type Error" + raise + # Show + # for algotuple in algosN: + # plt.figure() + # plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest') + # for algotuple in algosL: + # for ilbd in numpy.arange(lambdas.size): + # plt.figure() + # plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest') + # plt.show() + print "Finished." + +def genData(d,sigma,delta,rho,numvects,SNRdb): + + # Process parameters + noiselevel = 1.0 / (10.0**(SNRdb/10.0)); + p = round(sigma*d); + m = round(delta*d); + l = round(d - rho*m); + + # Generate Omega and data based on parameters + Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); + # Optionally make Omega more coherent + U,S,Vt = numpy.linalg.svd(Omega); + Sdnew = S * (1+numpy.arange(S.size)) # Make D coherent, not Omega! + Snew = numpy.vstack((numpy.diag(Sdnew), numpy.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) + Omega = numpy.dot(U , numpy.dot(Snew,Vt)) + + # Generate data + x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); + + return Omega,x0,y,M,realnoise + +def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): + + d = Omega.shape[1] + + nalgosN = len(algosN) + nalgosL = len(algosL) + + xrec = dict() + err = dict() + relerr = dict() + + # Prepare storage variables for algorithms non-Lambda + for i,algo in zip(numpy.arange(nalgosN),algosN): + xrec[algo[1]] = numpy.zeros((d, y.shape[1])) + err[algo[1]] = numpy.zeros(y.shape[1]) + relerr[algo[1]] = numpy.zeros(y.shape[1]) + # Prepare storage variables for algorithms with Lambda + for i,algo in zip(numpy.arange(nalgosL),algosL): + xrec[algo[1]] = numpy.zeros((lambdas.size, d, y.shape[1])) + err[algo[1]] = numpy.zeros((lambdas.size, y.shape[1])) + relerr[algo[1]] = numpy.zeros((lambdas.size, y.shape[1])) + + # Run algorithms non-Lambda + for iy in numpy.arange(y.shape[1]): + for algofunc,strname in algosN: + epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy]) + xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) + err[strname][iy] = numpy.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) + relerr[strname][iy] = err[strname][iy] / numpy.linalg.norm(x0[:,iy]) + for algotuple in algosN: + print algotuple[1],' : avg relative error = ',numpy.mean(relerr[strname]) + + # Run algorithms with Lambda + for ilbd,lbd in zip(numpy.arange(lambdas.size),lambdas): + for iy in numpy.arange(y.shape[1]): + D = numpy.linalg.pinv(Omega) + U,S,Vt = numpy.linalg.svd(D) + for algofunc,strname in algosL: + epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy]) + gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) + xrec[strname][ilbd,:,iy] = numpy.dot(D,gamma) + err[strname][ilbd,iy] = numpy.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) + relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / numpy.linalg.norm(x0[:,iy]) + print 'Lambda = ',lbd,' :' + for algotuple in algosL: + print ' ',algotuple[1],' : avg relative error = ',numpy.mean(relerr[strname][ilbd,:]) + + # Prepare results + mrelerrN = dict() + for algotuple in algosN: + mrelerrN[algotuple[1]] = numpy.mean(relerr[algotuple[1]]) + mrelerrL = dict() + for algotuple in algosL: + mrelerrL[algotuple[1]] = numpy.mean(relerr[algotuple[1]],1) + + return mrelerrN,mrelerrL + +# Script main +if __name__ == "__main__": + mainrun() \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/ABSapproxSingle.py Wed Dec 14 14:37:20 2011 +0000 @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Nov 05 18:08:40 2011 + +@author: Nic +""" + +import numpy as np +import scipy.io +import math +doplot = True +try: + import matplotlib.pyplot as plt +except: + doplot = False +if doplot: + import matplotlib.cm as cm +import pyCSalgos +import pyCSalgos.GAP.GAP +import pyCSalgos.SL0.SL0_approx + +# Define functions that prepare arguments for each algorithm call +def run_gap(y,M,Omega,epsilon): + gapparams = {"num_iteration" : 1000,\ + "greedy_level" : 0.9,\ + "stopping_coefficient_size" : 1e-4,\ + "l2solver" : 'pseudoinverse',\ + "noise_level": epsilon} + return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0] + +def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = np.linalg.pinv(Omega) + #U,S,Vt = np.linalg.svd(D) + aggDupper = np.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = np.concatenate((aggDupper, lbd * aggDlower)) + aggy = np.concatenate((y, np.zeros(N-n))) + + sigmamin = 0.001 + sigma_decrease_factor = 0.5 + mu_0 = 2 + L = 10 + return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) + +def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = np.linalg.pinv(Omega) + #U,S,Vt = np.linalg.svd(D) + aggDupper = np.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = np.concatenate((aggDupper, lbd * aggDlower)) + aggy = np.concatenate((y, np.zeros(N-n))) + + sigmamin = 0.001 + sigma_decrease_factor = 0.5 + mu_0 = 2 + L = 10 + return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) + + +# Define tuples (algorithm setup function, algorithm function, name) +gap = (run_gap, 'GAP') +sl0 = (run_sl0, 'SL0_approx') + +# Define which algorithms to run +# 1. Algorithms not depending on lambda +algosN = gap, # tuple +# 2. Algorithms depending on lambda (our ABS approach) +algosL = sl0, # tuple + +def mainrun(): + + nalgosN = len(algosN) + nalgosL = len(algosL) + + #Set up experiment parameters + d = 50.0; + sigma = 2.0 + #deltas = np.arange(0.05,1.,0.05) + #rhos = np.arange(0.05,1.,0.05) + deltas = np.array([0.05, 0.45, 0.95]) + rhos = np.array([0.05, 0.45, 0.95]) + #deltas = np.array([0.05]) + #rhos = np.array([0.05]) + #delta = 0.8; + #rho = 0.15; + numvects = 100; # Number of vectors to generate + SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + #lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) + lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000]) + + meanmatrix = dict() + for i,algo in zip(np.arange(nalgosN),algosN): + meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) + for i,algo in zip(np.arange(nalgosL),algosL): + meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) + + for idelta,delta in zip(np.arange(deltas.size),deltas): + for irho,rho in zip(np.arange(rhos.size),rhos): + + # Generate data and operator + Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) + + # Run algorithms + print "***** delta = ",delta," rho = ",rho + mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) + + for algotuple in algosN: + meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] + if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): + meanmatrix[algotuple[1]][irho,idelta] = 0 + for algotuple in algosL: + for ilbd in np.arange(lambdas.size): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] + if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 + + # # Prepare matrices to show + # showmats = dict() + # for i,algo in zip(np.arange(nalgosN),algosN): + # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) + # for i,algo in zip(np.arange(nalgosL),algosL): + # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) + + # Save + tosave = dict() + tosave['meanmatrix'] = meanmatrix + tosave['d'] = d + tosave['sigma'] = sigma + tosave['deltas'] = deltas + tosave['rhos'] = rhos + tosave['numvects'] = numvects + tosave['SNRdb'] = SNRdb + tosave['lambdas'] = lambdas + try: + scipy.io.savemat('ABSapprox.mat',tosave) + except TypeError: + print "Oops, Type Error" + raise + # Show + if doplot: + for algotuple in algosN: + plt.figure() + plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower') + for algotuple in algosL: + for ilbd in np.arange(lambdas.size): + plt.figure() + plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower') + plt.show() + print "Finished." + +def genData(d,sigma,delta,rho,numvects,SNRdb): + + # Process parameters + noiselevel = 1.0 / (10.0**(SNRdb/10.0)); + p = round(sigma*d); + m = round(delta*d); + l = round(d - rho*m); + + # Generate Omega and data based on parameters + Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); + # Optionally make Omega more coherent + U,S,Vt = np.linalg.svd(Omega); + Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! + Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) + Omega = np.dot(U , np.dot(Snew,Vt)) + + # Generate data + x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); + + return Omega,x0,y,M,realnoise + +def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): + + d = Omega.shape[1] + + nalgosN = len(algosN) + nalgosL = len(algosL) + + xrec = dict() + err = dict() + relerr = dict() + + # Prepare storage variables for algorithms non-Lambda + for i,algo in zip(np.arange(nalgosN),algosN): + xrec[algo[1]] = np.zeros((d, y.shape[1])) + err[algo[1]] = np.zeros(y.shape[1]) + relerr[algo[1]] = np.zeros(y.shape[1]) + # Prepare storage variables for algorithms with Lambda + for i,algo in zip(np.arange(nalgosL),algosL): + xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) + err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + + # Run algorithms non-Lambda + for iy in np.arange(y.shape[1]): + for algofunc,strname in algosN: + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) + err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) + relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) + for algotuple in algosN: + print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) + + # Run algorithms with Lambda + for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): + for iy in np.arange(y.shape[1]): + D = np.linalg.pinv(Omega) + U,S,Vt = np.linalg.svd(D) + for algofunc,strname in algosL: + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) + xrec[strname][ilbd,:,iy] = np.dot(D,gamma) + err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) + relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) + print 'Lambda = ',lbd,' :' + for algotuple in algosL: + print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) + + # Prepare results + mrelerrN = dict() + for algotuple in algosN: + mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) + mrelerrL = dict() + for algotuple in algosL: + mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) + + return mrelerrN,mrelerrL + +# Script main +if __name__ == "__main__": + + #import cProfile + #cProfile.run('mainrun()', 'profile') + mainrun() \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/algos.py Wed Dec 14 14:37:20 2011 +0000 @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Dec 07 14:06:13 2011 + +@author: ncleju +""" + +import numpy +import pyCSalgos +import pyCSalgos.GAP.GAP +import pyCSalgos.BP.l1qc +import pyCSalgos.BP.l1qec +import pyCSalgos.SL0.SL0_approx +import pyCSalgos.OMP.omp_QR +import pyCSalgos.RecomTST.RecommendedTST +import pyCSalgos.NESTA.NESTA + +#========================== +# Algorithm functions +#========================== +def run_gap(y,M,Omega,epsilon): + gapparams = {"num_iteration" : 1000,\ + "greedy_level" : 0.9,\ + "stopping_coefficient_size" : 1e-4,\ + "l2solver" : 'pseudoinverse',\ + "noise_level": epsilon} + return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,numpy.zeros(Omega.shape[1]))[0] + +def run_bp_analysis(y,M,Omega,epsilon): + + N,n = Omega.shape + D = numpy.linalg.pinv(Omega) + U,S,Vt = numpy.linalg.svd(D) + Aeps = numpy.dot(M,D) + Aexact = Vt[-(N-n):,:] + # We don't ned any aggregate matrices anymore + + x0 = numpy.zeros(N) + return numpy.dot(D , pyCSalgos.BP.l1qec.l1qec_logbarrier(x0,Aeps,Aeps.T,y,epsilon,Aexact,Aexact.T,numpy.zeros(N-n))) + +def run_sl0_analysis(y,M,Omega,epsilon): + + N,n = Omega.shape + D = numpy.linalg.pinv(Omega) + U,S,Vt = numpy.linalg.svd(D) + Aeps = numpy.dot(M,D) + Aexact = Vt[-(N-n):,:] + # We don't ned any aggregate matrices anymore + + sigmamin = 0.001 + sigma_decrease_factor = 0.5 + mu_0 = 2 + L = 10 + return numpy.dot(D , pyCSalgos.SL0.SL0_approx.SL0_approx_analysis(Aeps,Aexact,y,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)) + +def run_nesta(y,M,Omega,epsilon): + + U,S,V = numpy.linalg.svd(M, full_matrices = True) + V = V.T # Make like Matlab + m,n = M.shape # Make like Matlab + S = numpy.hstack((numpy.diag(S), numpy.zeros((m,n-m)))) + + opt_muf = 1e-3 + optsUSV = {'U':U, 'S':S, 'V':V} + opts = {'U':Omega, 'Ut':Omega.T.copy(), 'USV':optsUSV, 'TolVar':1e-5, 'Verbose':0} + return pyCSalgos.NESTA.NESTA.NESTA(M, None, y, opt_muf, epsilon, opts)[0] + + +def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = numpy.linalg.pinv(Omega) + #U,S,Vt = numpy.linalg.svd(D) + aggDupper = numpy.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) + aggy = numpy.concatenate((y, numpy.zeros(N-n))) + + sigmamin = 0.001 + sigma_decrease_factor = 0.5 + mu_0 = 2 + L = 10 + return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) + +def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = numpy.linalg.pinv(Omega) + #U,S,Vt = numpy.linalg.svd(D) + aggDupper = numpy.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) + aggy = numpy.concatenate((y, numpy.zeros(N-n))) + + x0 = numpy.zeros(N) + return pyCSalgos.BP.l1qc.l1qc_logbarrier(x0,aggD,aggD.T,aggy,epsilon) + +def run_ompeps(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = numpy.linalg.pinv(Omega) + #U,S,Vt = numpy.linalg.svd(D) + aggDupper = numpy.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) + aggy = numpy.concatenate((y, numpy.zeros(N-n))) + + opts = dict() + opts['stopCrit'] = 'mse' + opts['stopTol'] = epsilon**2 / aggy.size + return pyCSalgos.OMP.omp_QR.greed_omp_qr(aggy,aggD,aggD.shape[1],opts)[0] + +def run_tst(y,M,Omega,D,U,S,Vt,epsilon,lbd): + + N,n = Omega.shape + #D = numpy.linalg.pinv(Omega) + #U,S,Vt = numpy.linalg.svd(D) + aggDupper = numpy.dot(M,D) + aggDlower = Vt[-(N-n):,:] + aggD = numpy.concatenate((aggDupper, lbd * aggDlower)) + aggy = numpy.concatenate((y, numpy.zeros(N-n))) + + nsweep = 300 + tol = epsilon / numpy.linalg.norm(aggy) + return pyCSalgos.RecomTST.RecommendedTST.RecommendedTST(aggD, aggy, nsweep=nsweep, tol=tol) + + +#========================== +# Define tuples (algorithm function, name) +#========================== +gap = (run_gap, 'GAP') +sl0 = (run_sl0, 'SL0a') +sl0analysis = (run_sl0_analysis, 'SL0a2') +bpanalysis = (run_bp_analysis, 'BPa2') +nesta = (run_nesta, 'NESTA') +bp = (run_bp, 'BP') +ompeps = (run_ompeps, 'OMPeps') +tst = (run_tst, 'TST') \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/stdparams.py Wed Dec 14 14:37:20 2011 +0000 @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Dec 07 14:04:40 2011 + +@author: ncleju +""" + +import numpy +from algos import * + +#========================== +# Standard parameters +#========================== +# Standard parameters for quick testing +# Algorithms: GAP, SL0 and BP +# d=50, sigma = 2, delta and rho only 3 x 3, lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 +# Do save data, do save plots, don't show plots +# Useful for short testing +def stdtest(): + # Define which algorithms to run + algosN = nesta, # tuple of algorithms not depending on lambda + #algosL = sl0,bp # tuple of algorithms depending on lambda (our ABS approach) + algosL = sl0, + + d = 50.0 + sigma = 2.0 + deltas = numpy.array([0.05, 0.45, 0.95]) + rhos = numpy.array([0.05, 0.45, 0.95]) + #deltas = numpy.array([0.95]) + #deltas = numpy.arange(0.05,1.,0.05) + #rhos = numpy.array([0.05]) + numvects = 10; # Number of vectors to generate + SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) + + dosavedata = True + savedataname = 'approx_pt_stdtest.mat' + doshowplot = False + dosaveplot = True + saveplotbase = 'approx_pt_stdtest_' + saveplotexts = ('png','pdf','eps') + + return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ + doshowplot,dosaveplot,saveplotbase,saveplotexts + + +# Standard parameters 1 +# All algorithms, 100 vectors +# d=50, sigma = 2, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 +# Do save data, do save plots, don't show plots +def std1(): + # Define which algorithms to run + algosN = gap,sl0analysis,bpanalysis,nesta # tuple of algorithms not depending on lambda + algosL = sl0,bp,ompeps,tst # tuple of algorithms depending on lambda (our ABS approach) + + d = 50.0; + sigma = 2.0 + deltas = numpy.arange(0.05,1.,0.05) + rhos = numpy.arange(0.05,1.,0.05) + numvects = 100; # Number of vectors to generate + SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) + + dosavedata = True + savedataname = 'approx_pt_std1.mat' + doshowplot = False + dosaveplot = True + saveplotbase = 'approx_pt_std1_' + saveplotexts = ('png','pdf','eps') + + return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ + doshowplot,dosaveplot,saveplotbase,saveplotexts + + +# Standard parameters 2 +# All algorithms, 100 vectors +# d=20, sigma = 10, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 +# Do save data, do save plots, don't show plots +def std2(): + # Define which algorithms to run + algosN = gap,sl0analysis,bpanalysis,nesta # tuple of algorithms not depending on lambda + algosL = sl0,bp,ompeps,tst # tuple of algorithms depending on lambda (our ABS approach) + + d = 20.0 + sigma = 10.0 + deltas = numpy.arange(0.05,1.,0.05) + rhos = numpy.arange(0.05,1.,0.05) + numvects = 100; # Number of vectors to generate + SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) + + dosavedata = True + savedataname = 'approx_pt_std2.mat' + doshowplot = False + dosaveplot = True + saveplotbase = 'approx_pt_std2_' + saveplotexts = ('png','pdf','eps') + + return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ + doshowplot,dosaveplot,saveplotbase,saveplotexts + + + # Standard parameters 3 +# All algorithms, 100 vectors +# d=50, sigma = 2, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 +# Do save data, do save plots, don't show plots +# IDENTICAL with 1 but with 10dB SNR noise +def std3(): + # Define which algorithms to run + algosN = gap,sl0analysis,bpanalysis,nesta # tuple of algorithms not depending on lambda + algosL = sl0,bp,ompeps,tst # tuple of algorithms depending on lambda (our ABS approach) + + d = 50.0; + sigma = 2.0 + deltas = numpy.arange(0.05,1.,0.05) + rhos = numpy.arange(0.05,1.,0.05) + numvects = 100; # Number of vectors to generate + SNRdb = 10.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) + + dosavedata = True + savedataname = 'approx_pt_std3.mat' + doshowplot = False + dosaveplot = True + saveplotbase = 'approx_pt_std3_' + saveplotexts = ('png','pdf','eps') + + return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ + doshowplot,dosaveplot,saveplotbase,saveplotexts + +# Standard parameters 4 +# All algorithms, 100 vectors +# d=20, sigma = 10, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 +# Do save data, do save plots, don't show plots +# Identical to 2 but with 10dB SNR noise +def std4(): + # Define which algorithms to run + algosN = gap,sl0analysis,bpanalysis,nesta # tuple of algorithms not depending on lambda + algosL = sl0,bp,ompeps,tst # tuple of algorithms depending on lambda (our ABS approach) + + d = 20.0 + sigma = 10.0 + deltas = numpy.arange(0.05,1.,0.05) + rhos = numpy.arange(0.05,1.,0.05) + numvects = 100; # Number of vectors to generate + SNRdb = 10.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) + + dosavedata = True + savedataname = 'approx_pt_std4.mat' + doshowplot = False + dosaveplot = True + saveplotbase = 'approx_pt_std4_' + saveplotexts = ('png','pdf','eps') + + return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ + doshowplot,dosaveplot,saveplotbase,saveplotexts + +# Standard parameters 1nesta +# Only NESTA, 100 vectors +# d=50, sigma = 2, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 +# Do save data, do save plots, don't show plots +# Identical to std1 but with only NESTA +def std1nesta(): + # Define which algorithms to run + algosN = nesta, # tuple of algorithms not depending on lambda + algosL = () # tuple of algorithms depending on lambda (our ABS approach) + + d = 50.0; + sigma = 2.0 + deltas = numpy.arange(0.05,1.,0.05) + rhos = numpy.arange(0.05,1.,0.05) + numvects = 100; # Number of vectors to generate + SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) + + dosavedata = True + savedataname = 'approx_pt_std1nesta.mat' + doshowplot = False + dosaveplot = True + saveplotbase = 'approx_pt_std1nesta_' + saveplotexts = ('png','pdf','eps') + + return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ + doshowplot,dosaveplot,saveplotbase,saveplotexts + +# Standard parameters 2nesta +# Only NESTA, 100 vectors +# d=20, sigma = 10, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 +# Do save data, do save plots, don't show plots +# Identical with std2, but with only NESTA +def std2nesta(): + # Define which algorithms to run + algosN = nesta, # tuple of algorithms not depending on lambda + algosL = () # tuple of algorithms depending on lambda (our ABS approach) + + d = 20.0 + sigma = 10.0 + deltas = numpy.arange(0.05,1.,0.05) + rhos = numpy.arange(0.05,1.,0.05) + numvects = 100; # Number of vectors to generate + SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) + + dosavedata = True + savedataname = 'approx_pt_std2nesta.mat' + doshowplot = False + dosaveplot = True + saveplotbase = 'approx_pt_std2nesta_' + saveplotexts = ('png','pdf','eps') + + return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ + doshowplot,dosaveplot,saveplotbase,saveplotexts + + # Standard parameters 3nesta +# Only NESTA, 100 vectors +# d=50, sigma = 2, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 +# Do save data, do save plots, don't show plots +# IDENTICAL with 3 but with only NESTA +def std3nesta(): + # Define which algorithms to run + algosN = nesta, # tuple of algorithms not depending on lambda + algosL = () # tuple of algorithms depending on lambda (our ABS approach) + + d = 50.0; + sigma = 2.0 + deltas = numpy.arange(0.05,1.,0.05) + rhos = numpy.arange(0.05,1.,0.05) + numvects = 100; # Number of vectors to generate + SNRdb = 10.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) + + dosavedata = True + savedataname = 'approx_pt_std3nesta.mat' + doshowplot = False + dosaveplot = True + saveplotbase = 'approx_pt_std3nesta_' + saveplotexts = ('png','pdf','eps') + + return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ + doshowplot,dosaveplot,saveplotbase,saveplotexts + +# Standard parameters 4nesta +# Only NESTA, 100 vectors +# d=20, sigma = 10, delta and rho full resolution (0.05 step), lambdas = 0, 1e-4, 1e-2, 1, 100, 10000 +# Do save data, do save plots, don't show plots +# Identical to 4 but with only NESTA +def std4nesta(): + # Define which algorithms to run + algosN = nesta, # tuple of algorithms not depending on lambda + algosL = () # tuple of algorithms depending on lambda (our ABS approach) + + d = 20.0 + sigma = 10.0 + deltas = numpy.arange(0.05,1.,0.05) + rhos = numpy.arange(0.05,1.,0.05) + numvects = 100; # Number of vectors to generate + SNRdb = 10.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = numpy.array([0., 0.0001, 0.01, 1, 100, 10000]) + + dosavedata = True + savedataname = 'approx_pt_std4nesta.mat' + doshowplot = False + dosaveplot = True + saveplotbase = 'approx_pt_std4nesta_' + saveplotexts = ('png','pdf','eps') + + return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\ + doshowplot,dosaveplot,saveplotbase,saveplotexts \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/utils.py Wed Dec 14 14:37:20 2011 +0000 @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Nov 09 12:28:55 2011 + +@author: ncleju +""" + +import numpy +import scipy.io +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import matplotlib.colors as mcolors + +# Sample call +#utils.loadshowmatrices_multipixels('H:\\CS\\Python\\Results\\pt_std1\\approx_pt_std1.mat', dosave=True, saveplotbase='approx_pt_std1_',saveplotexts=('png','eps','pdf')) + +#def loadshowmatrices(filename, algonames = None): +# mdict = scipy.io.loadmat(filename) +# if algonames == None: +# algonames = mdict['algonames'] +# +# for algonameobj in algonames: +# algoname = algonameobj[0][0] +# print algoname +# if mdict['meanmatrix'][algoname][0,0].ndim == 2: +# plt.figure() +# plt.imshow(mdict['meanmatrix'][algoname][0,0], cmap=cm.gray, interpolation='nearest',origin='lower') +# elif mdict['meanmatrix'][algoname][0,0].ndim == 3: +# for matrix in mdict['meanmatrix'][algoname][0,0]: +# plt.figure() +# plt.imshow(matrix, cmap=cm.gray, interpolation='nearest',origin='lower') +# plt.show() +# print "Finished." +# +# +#def loadsavematrices(filename, saveplotbase, saveplotexts, algonames = None): +# +# mdict = scipy.io.loadmat(filename) +# lambdas = mdict['lambdas'] +# +# if algonames is None: +# algonames = mdict['algonames'] +# +# for algonameobj in algonames: +# algoname = algonameobj[0][0] +# print algoname +# if mdict['meanmatrix'][algoname][0,0].ndim == 2: +# plt.figure() +# plt.imshow(mdict['meanmatrix'][algoname][0,0], cmap=cm.gray, interpolation='nearest',origin='lower') +# for ext in saveplotexts: +# plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') +# elif mdict['meanmatrix'][algoname][0,0].ndim == 3: +# ilbd = 0 +# for matrix in mdict['meanmatrix'][algoname][0,0]: +# plt.figure() +# plt.imshow(matrix, cmap=cm.gray, interpolation='nearest',origin='lower') +# for ext in saveplotexts: +# plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') +# ilbd = ilbd + 1 +# print "Finished." + +def loadmatrices(filename, algonames=None, doshow=True, dosave=False, saveplotbase=None, saveplotexts=None): + + if dosave and (saveplotbase is None or saveplotexts is None): + print('Error: please specify name and extensions for saving') + raise Exception('Name or extensions for saving not specified') + + mdict = scipy.io.loadmat(filename) + + if dosave: + lambdas = mdict['lambdas'] + + if algonames is None: + algonames = mdict['algonames'] + + for algonameobj in algonames: + algoname = algonameobj[0][0] + print algoname + if mdict['meanmatrix'][algoname][0,0].ndim == 2: + plt.figure() + plt.imshow(mdict['meanmatrix'][algoname][0,0], cmap=cm.gray, norm=mcolors.Normalize(0,1), interpolation='nearest',origin='lower') + if dosave: + for ext in saveplotexts: + plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') + elif mdict['meanmatrix'][algoname][0,0].ndim == 3: + if dosave: + ilbd = 0 + for matrix in mdict['meanmatrix'][algoname][0,0]: + plt.figure() + plt.imshow(matrix, cmap=cm.gray, norm=mcolors.Normalize(0,1), interpolation='nearest',origin='lower') + if dosave: + for ext in saveplotexts: + plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') + ilbd = ilbd + 1 + + if doshow: + plt.show() + print "Finished." + + +def loadshowmatrices_multipixels(filename, algonames = None, doshow=True, dosave=False, saveplotbase=None, saveplotexts=None): + + if dosave and (saveplotbase is None or saveplotexts is None): + print('Error: please specify name and extensions for saving') + raise Exception('Name or extensions for saving not specified') + + mdict = scipy.io.loadmat(filename) + + if dosave: + lambdas = mdict['lambdas'] + + if algonames == None: + algonames = mdict['algonames'] + +# thresh = 0.90 + N = 10 +# border = 2 +# bordercolor = 0 # black + + for algonameobj in algonames: + algoname = algonameobj[0][0] + print algoname + if mdict['meanmatrix'][algoname][0,0].ndim == 2: + + # Prepare bigger matrix + rows,cols = mdict['meanmatrix'][algoname][0,0].shape + bigmatrix = numpy.zeros((N*rows,N*cols)) + for i in numpy.arange(rows): + for j in numpy.arange(cols): + bigmatrix[i*N:i*N+N,j*N:j*N+N] = mdict['meanmatrix'][algoname][0,0][i,j] + bigmatrix = int_drawseparation(mdict['meanmatrix'][algoname][0,0],bigmatrix,10,0.9,2,0) + bigmatrix = int_drawseparation(mdict['meanmatrix'][algoname][0,0],bigmatrix,10,0.8,2,0.2) + bigmatrix = int_drawseparation(mdict['meanmatrix'][algoname][0,0],bigmatrix,10,0.5,2,1) +# # Mark 95% border +# if mdict['meanmatrix'][algoname][0,0][i,j] > thresh: +# # Top border +# if mdict['meanmatrix'][algoname][0,0][i-1,j] < thresh and i>0: +# bigmatrix[i*N:i*N+border,j*N:j*N+N] = bordercolor +# # Bottom border +# if mdict['meanmatrix'][algoname][0,0][i+1,j] < thresh and i<rows-1: +# bigmatrix[i*N+N-border:i*N+N,j*N:j*N+N] = bordercolor +# # Left border +# if mdict['meanmatrix'][algoname][0,0][i,j-1] < thresh and j>0: +# bigmatrix[i*N:i*N+N,j*N:j*N+border] = bordercolor +# # Right border (not very probable) +# if j<cols-1 and mdict['meanmatrix'][algoname][0,0][i,j+1] < thresh: +# bigmatrix[i*N:i*N+N,j*N+N-border:j*N+N] = bordercolor + + plt.figure() + #plt.imshow(mdict['meanmatrix'][algoname][0,0], cmap=cm.gray, interpolation='nearest',origin='lower') + plt.imshow(bigmatrix, cmap=cm.gray, interpolation='nearest',origin='lower') + if dosave: + for ext in saveplotexts: + plt.savefig(saveplotbase + algoname + '.' + ext, bbox_inches='tight') + elif mdict['meanmatrix'][algoname][0,0].ndim == 3: + if dosave: + ilbd = 0 + + for matrix in mdict['meanmatrix'][algoname][0,0]: + + # Prepare bigger matrix + rows,cols = matrix.shape + bigmatrix = numpy.zeros((N*rows,N*cols)) + for i in numpy.arange(rows): + for j in numpy.arange(cols): + bigmatrix[i*N:i*N+N,j*N:j*N+N] = matrix[i,j] + bigmatrix = int_drawseparation(matrix,bigmatrix,10,0.9,2,0) + bigmatrix = int_drawseparation(matrix,bigmatrix,10,0.8,2,0.2) + bigmatrix = int_drawseparation(matrix,bigmatrix,10,0.5,2,1) +# # Mark 95% border +# if matrix[i,j] > thresh: +# # Top border +# if matrix[i-1,j] < thresh and i>0: +# bigmatrix[i*N:i*N+border,j*N:j*N+N] = bordercolor +# # Bottom border +# if matrix[i+1,j] < thresh and i<rows-1: +# bigmatrix[i*N+N-border:i*N+N,j*N:j*N+N] = bordercolor +# # Left border +# if matrix[i,j-1] < thresh and j>0: +# bigmatrix[i*N:i*N+N,j*N:j*N+border] = bordercolor +# # Right border (not very probable) +# if j<cols-1 and matrix[i,j+1] < thresh: +# bigmatrix[i*N:i*N+N,j*N+N-border:j*N+N] = bordercolor + + plt.figure() + #plt.imshow(matrix, cmap=cm.gray, interpolation='nearest',origin='lower') + plt.imshow(bigmatrix, cmap=cm.gray, interpolation='nearest',origin='lower') + if dosave: + for ext in saveplotexts: + plt.savefig(saveplotbase + algoname + ('_lbd%.0e' % lambdas[ilbd]) + '.' + ext, bbox_inches='tight') + ilbd = ilbd + 1 + if doshow: + plt.show() + print "Finished." + +def appendtomatfile(filename, toappend, toappendname): + mdict = scipy.io.loadmat(filename) + mdict[toappendname] = toappend + try: + scipy.io.savemat(filename, mdict) + except: + print "Save error" + + # To save to a cell array, create an object array: + # >>> obj_arr = np.zeros((2,), dtype=np.object) + # >>> obj_arr[0] = 1 + # >>> obj_arr[1] = 'a string' + +def int_drawseparation(matrix,bigmatrix,N,thresh,border,bordercolor): + rows,cols = matrix.shape + for i in numpy.arange(rows): + for j in numpy.arange(cols): + # Mark border + # Use top-left corner of current square for reference + if matrix[i,j] > thresh: + # Top border + if matrix[i-1,j] < thresh and i>0: + bigmatrix[i*N:i*N+border,j*N:j*N+N] = bordercolor + # Bottom border + if i<rows-1 and matrix[i+1,j] < thresh: + bigmatrix[i*N+N-border:i*N+N,j*N:j*N+N] = bordercolor + # Left border + if matrix[i,j-1] < thresh and j>0: + bigmatrix[i*N:i*N+N,j*N:j*N+border] = bordercolor + # Right border (not very probable) + if j<cols-1 and matrix[i,j+1] < thresh: + bigmatrix[i*N:i*N+N,j*N+N-border:j*N+N] = bordercolor + + return bigmatrix \ No newline at end of file