Mercurial > hg > pycsalgos
diff scripts/ABSapprox.py @ 22:2dd78e37b23a
ABS approx script is working
Started working on parallel
author | nikcleju |
---|---|
date | Wed, 09 Nov 2011 00:11:14 +0000 |
parents | 45255b0a6dba |
children | c02eb33d2c54 |
line wrap: on
line diff
--- a/scripts/ABSapprox.py Tue Nov 08 14:45:35 2011 +0000 +++ b/scripts/ABSapprox.py Wed Nov 09 00:11:14 2011 +0000 @@ -6,61 +6,133 @@ """ import numpy as np +import scipy.io +import math +import matplotlib.pyplot as plt +import matplotlib.cm as cm import pyCSalgos import pyCSalgos.GAP.GAP import pyCSalgos.SL0.SL0_approx # Define functions that prepare arguments for each algorithm call -def gap_paramsetup(y,M,Omega,epsilon,lbd): +def run_gap(y,M,Omega,epsilon): gapparams = {"num_iteration" : 1000,\ "greedy_level" : 0.9,\ "stopping_coefficient_size" : 1e-4,\ "l2solver" : 'pseudoinverse',\ "noise_level": epsilon} - return y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]) -def sl0_paramsetup(y,M,Omega,epsilon,lbd): + return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0] + +def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd): N,n = Omega.shape - D = np.linalg.pinv(Omega) - U,S,Vt = np.linalg.svd(D) + #D = np.linalg.pinv(Omega) + #U,S,Vt = np.linalg.svd(D) aggDupper = np.dot(M,D) aggDlower = Vt[-(N-n):,:] aggD = np.concatenate((aggDupper, lbd * aggDlower)) aggy = np.concatenate((y, np.zeros(N-n))) - sigmamin = 0.01 - sigma_decrease_factor = 0.8 + sigmamin = 0.001 + sigma_decrease_factor = 0.5 mu_0 = 2 L = 10 - return aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L - -def post_multiply_with_D(D,gamma): - return np.dot(D,gamma) -def post_do_nothing(D,gamma): - return gamma + return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L) # Define tuples (algorithm setup function, algorithm function, name) -gap = (gap_paramsetup, pyCSalgos.GAP.GAP.GAP, post_do_nothing, 'GAP') -sl0 = (sl0_paramsetup, pyCSalgos.SL0.SL0_approx.SL0_approx, post_multiply_with_D, 'SL0_approx') -#sl0 = (sl0_paramsetup, lambda x: np.dot(x[0],x[1]()), 'SL0_approx') +gap = (run_gap, 'GAP') +sl0 = (run_sl0, 'SL0_approx') -# Main function +# Define which algorithms to run +# 1. Algorithms not depending on lambda +algosN = gap, # tuple +# 2. Algorithms depending on lambda (our ABS approach) +algosL = sl0, # tuple + def mainrun(): - - # Define which algorithms to run - algos = (gap, sl0) - numalgos = len(algos) - # Set up experiment parameters - sigma = 2.0; - delta = 0.8; - rho = 0.15; + nalgosN = len(algosN) + nalgosL = len(algosL) + + #Set up experiment parameters + d = 50; + sigma = 2.0 + #deltas = np.arange(0.05,0.95,0.05) + #rhos = np.arange(0.05,0.95,0.05) + deltas = np.array([0.05,0.95]) + rhos = np.array([0.05,0.95]) + #deltas = np.array([0.05]) + #rhos = np.array([0.05]) + #delta = 0.8; + #rho = 0.15; numvects = 10; # Number of vectors to generate SNRdb = 20.; # This is norm(signal)/norm(noise), so power, not energy + # Values for lambda + #lambdas = [0 10.^linspace(-5, 4, 10)]; + lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) + + meanmatrix = dict() + for i,algo in zip(np.arange(nalgosN),algosN): + meanmatrix[algo[1]] = np.zeros((rhos.size, deltas.size)) + for i,algo in zip(np.arange(nalgosL),algosL): + meanmatrix[algo[1]] = np.zeros((lambdas.size, rhos.size, deltas.size)) + + for idelta,delta in zip(np.arange(deltas.size),deltas): + for irho,rho in zip(np.arange(rhos.size),rhos): + + # Generate data and operator + Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb) + + # Run algorithms + mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0) + + for algotuple in algosN: + meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]] + if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]): + meanmatrix[algotuple[1]][irho,idelta] = 0 + for algotuple in algosL: + for ilbd in np.arange(lambdas.size): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd] + if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]): + meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0 + + # # Prepare matrices to show + # showmats = dict() + # for i,algo in zip(np.arange(nalgosN),algosN): + # showmats[algo[1]] = np.zeros(rhos.size, deltas.size) + # for i,algo in zip(np.arange(nalgosL),algosL): + # showmats[algo[1]] = np.zeros(lambdas.size, rhos.size, deltas.size) + + # Save + tosave = dict() + tosave['meanmatrix'] = meanmatrix + tosave['d'] = d + tosave['sigma'] = sigma + tosave['deltas'] = deltas + tosave['rhos'] = rhos + tosave['numvects'] = numvects + tosave['SNRdb'] = SNRdb + tosave['lambdas'] = lambdas + try: + scipy.io.savemat('ABSapprox.mat',tosave) + except TypeError: + print "Oops, Type Error" + raise + # Show + for algotuple in algosN: + plt.figure() + plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest') + for algotuple in algosL: + for ilbd in np.arange(lambdas.size): + plt.figure() + plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest') + plt.show() + print "Finished." + +def genData(d,sigma,delta,rho,numvects,SNRdb): # Process parameters noiselevel = 1.0 / (10.0**(SNRdb/10.0)); - d = 50; p = round(sigma*d); m = round(delta*d); l = round(d - rho*m); @@ -68,43 +140,73 @@ # Generate Omega and data based on parameters Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p); # Optionally make Omega more coherent - #[U, S, Vt] = np.linalg.svd(Omega); - #Sdnew = np.diag(S) * (1+np.arange(np.diag(S).size)); % Make D coherent, not Omega! - #Snew = [diag(Sdnew); zeros(size(S,1) - size(S,2), size(S,2))]; - #Omega = U * Snew * V'; + U,S,Vt = np.linalg.svd(Omega); + Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega! + Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1])))) + Omega = np.dot(U , np.dot(Snew,Vt)) # Generate data x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0'); + + return Omega,x0,y,M,realnoise - # Values for lambda - #lambdas = [0 10.^linspace(-5, 4, 10)]; - lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10))) +def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0): + + d = Omega.shape[1] + + nalgosN = len(algosN) + nalgosL = len(algosL) xrec = dict() err = dict() relerr = dict() - for i,algo in zip(np.arange(numalgos),algos): - xrec[algo[3]] = np.zeros((lambdas.size, d, y.shape[1])) - err[algo[3]] = np.zeros((lambdas.size, y.shape[1])) - relerr[algo[3]] = np.zeros((lambdas.size, y.shape[1])) + + # Prepare storage variables for algorithms non-Lambda + for i,algo in zip(np.arange(nalgosN),algosN): + xrec[algo[1]] = np.zeros((d, y.shape[1])) + err[algo[1]] = np.zeros(y.shape[1]) + relerr[algo[1]] = np.zeros(y.shape[1]) + # Prepare storage variables for algorithms with Lambda + for i,algo in zip(np.arange(nalgosL),algosL): + xrec[algo[1]] = np.zeros((lambdas.size, d, y.shape[1])) + err[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1])) + # Run algorithms non-Lambda + for iy in np.arange(y.shape[1]): + for algofunc,strname in algosN: + epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) + xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon) + err[strname][iy] = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy]) + relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy]) + for algotuple in algosN: + print algotuple[1],' : avg relative error = ',np.mean(relerr[strname]) + + # Run algorithms with Lambda for ilbd,lbd in zip(np.arange(lambdas.size),lambdas): for iy in np.arange(y.shape[1]): - for algosetupfunc,algofunc,algopostfunc,strname in algos: + D = np.linalg.pinv(Omega) + U,S,Vt = np.linalg.svd(D) + for algofunc,strname in algosL: epsilon = 1.1 * np.linalg.norm(realnoise[:,iy]) - - inparams = algosetupfunc(y[:,iy],M,Omega,epsilon,lbd) - xrec[strname][ilbd,:,iy] = algopostfunc(algofunc(*inparams)[0]) - + gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd) + xrec[strname][ilbd,:,iy] = np.dot(D,gamma) err[strname][ilbd,iy] = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy]) relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy]) - print 'Lambda = ',lbd,' :' - for strname in relerr: - print ' ',strname,' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) + for algotuple in algosL: + print ' ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:]) - - + # Prepare results + mrelerrN = dict() + for algotuple in algosN: + mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]]) + mrelerrL = dict() + for algotuple in algosL: + mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1) + + return mrelerrN,mrelerrL + # Script main if __name__ == "__main__": mainrun() \ No newline at end of file