changeset 10:b48f725ceafa

Added new files split from pyCSalgos: ABSexact.py, ABSlambda.py, ABSmixed.py, AnalysisGenerate.py, algos.py
author Nic Cleju <nikcleju@gmail.com>
date Fri, 09 Mar 2012 16:33:35 +0200
parents 04c9264f0e23
children b963105831c1
files ABSapproxMultiproc.py ABSapproxPP.py ABSapproxSingle.py ABSexact.py ABSlambda.py ABSmixed.py AnalysisGenerate.py algos.py
diffstat 7 files changed, 362 insertions(+), 726 deletions(-) [+]
line wrap: on
line diff
--- a/ABSapproxMultiproc.py	Thu Jan 19 19:12:20 2012 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,254 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Sat Nov 05 18:08:40 2011
-
-@author: Nic
-"""
-
-import numpy as np
-import scipy.io
-import math
-from multiprocessing import Pool
-doplot = True
-try: 
-  import matplotlib.pyplot as plt
-except:
-  doplot = False
-if doplot:  
-  import matplotlib.cm as cm
-import pyCSalgos
-import pyCSalgos.GAP.GAP
-import pyCSalgos.SL0.SL0_approx
-
-# Define functions that prepare arguments for each algorithm call
-def run_gap(y,M,Omega,epsilon):
-  gapparams = {"num_iteration" : 1000,\
-                   "greedy_level" : 0.9,\
-                   "stopping_coefficient_size" : 1e-4,\
-                   "l2solver" : 'pseudoinverse',\
-                   "noise_level": epsilon}
-  return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0]
- 
-def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd):
-  
-  N,n = Omega.shape
-  #D = np.linalg.pinv(Omega)
-  #U,S,Vt = np.linalg.svd(D)
-  aggDupper = np.dot(M,D)
-  aggDlower = Vt[-(N-n):,:]
-  aggD = np.concatenate((aggDupper, lbd * aggDlower))
-  aggy = np.concatenate((y, np.zeros(N-n)))
-  
-  sigmamin = 0.001
-  sigma_decrease_factor = 0.5
-  mu_0 = 2
-  L = 10
-  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
-
-def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd):
-  
-  N,n = Omega.shape
-  #D = np.linalg.pinv(Omega)
-  #U,S,Vt = np.linalg.svd(D)
-  aggDupper = np.dot(M,D)
-  aggDlower = Vt[-(N-n):,:]
-  aggD = np.concatenate((aggDupper, lbd * aggDlower))
-  aggy = np.concatenate((y, np.zeros(N-n)))
-  
-  sigmamin = 0.001
-  sigma_decrease_factor = 0.5
-  mu_0 = 2
-  L = 10
-  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
-
-
-# Define tuples (algorithm setup function, algorithm function, name)
-gap = (run_gap, 'GAP')
-sl0 = (run_sl0, 'SL0_approx')
-
-# Define which algorithms to run
-#  1. Algorithms not depending on lambda
-algosN = gap,   # tuple
-#  2. Algorithms depending on lambda (our ABS approach)
-algosL = sl0,   # tuple
-
-def mainrun():
-  
-  nalgosN = len(algosN)  
-  nalgosL = len(algosL)
-  
-  #Set up experiment parameters
-  d = 50.0;
-  sigma = 2.0
-  #deltas = np.arange(0.05,1.,0.05)
-  #rhos = np.arange(0.05,1.,0.05)
-  deltas = np.array([0.05, 0.45, 0.95])
-  rhos = np.array([0.05, 0.45, 0.95])
-  #deltas = np.array([0.05])
-  #rhos = np.array([0.05])
-  #delta = 0.8;
-  #rho   = 0.15;
-  numvects = 100; # Number of vectors to generate
-  SNRdb = 20.;    # This is norm(signal)/norm(noise), so power, not energy
-  # Values for lambda
-  #lambdas = [0 10.^linspace(-5, 4, 10)];
-  #lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10)))
-  lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000])
-
-  meanmatrix = dict()
-  for i,algo in zip(np.arange(nalgosN),algosN):
-    meanmatrix[algo[1]]   = np.zeros((rhos.size, deltas.size))
-  for i,algo in zip(np.arange(nalgosL),algosL):
-    meanmatrix[algo[1]]   = np.zeros((lambdas.size, rhos.size, deltas.size))
-  
-  jobparams = []
-  for idelta,delta in zip(np.arange(deltas.size),deltas):
-    for irho,rho in zip(np.arange(rhos.size),rhos):
-      
-      # Generate data and operator
-      Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb)
-      
-      # Run algorithms
-      print "***** delta = ",delta," rho = ",rho
-      #mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0)
-      jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0))
-
-  pool = Pool(4)
-  jobresults = pool.map(runoncetuple,jobparams)
-
-  idx = 0
-  for idelta,delta in zip(np.arange(deltas.size),deltas):
-    for irho,rho in zip(np.arange(rhos.size),rhos):
-      mrelerrN,mrelerrL = jobresults[idx]
-      idx = idx+1
-      for algotuple in algosN: 
-        meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]]
-        if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]):
-          meanmatrix[algotuple[1]][irho,idelta] = 0
-      for algotuple in algosL:
-        for ilbd in np.arange(lambdas.size):
-          meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd]
-          if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]):
-            meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0
-   
-  #  # Prepare matrices to show
-  #  showmats = dict()
-  #  for i,algo in zip(np.arange(nalgosN),algosN):
-  #    showmats[algo[1]]   = np.zeros(rhos.size, deltas.size)
-  #  for i,algo in zip(np.arange(nalgosL),algosL):
-  #    showmats[algo[1]]   = np.zeros(lambdas.size, rhos.size, deltas.size)
-
-    # Save
-    tosave = dict()
-    tosave['meanmatrix'] = meanmatrix
-    tosave['d'] = d
-    tosave['sigma'] = sigma
-    tosave['deltas'] = deltas
-    tosave['rhos'] = rhos
-    tosave['numvects'] = numvects
-    tosave['SNRdb'] = SNRdb
-    tosave['lambdas'] = lambdas
-    try:
-      scipy.io.savemat('ABSapprox.mat',tosave)
-    except TypeError:
-      print "Oops, Type Error"
-      raise    
-  # Show
-  if doplot:
-    for algotuple in algosN:
-      plt.figure()
-      plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower')
-    for algotuple in algosL:
-      for ilbd in np.arange(lambdas.size):
-        plt.figure()
-        plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower')
-    plt.show()
-  print "Finished."
-  
-def genData(d,sigma,delta,rho,numvects,SNRdb):
-
-  # Process parameters
-  noiselevel = 1.0 / (10.0**(SNRdb/10.0));
-  p = round(sigma*d);
-  m = round(delta*d);
-  l = round(d - rho*m);
-  
-  # Generate Omega and data based on parameters
-  Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p);
-  # Optionally make Omega more coherent
-  U,S,Vt = np.linalg.svd(Omega);
-  Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega!
-  Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1]))))
-  Omega = np.dot(U , np.dot(Snew,Vt))
-
-  # Generate data  
-  x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0');
-  
-  return Omega,x0,y,M,realnoise
-
-def runoncetuple(t):
-  return runonce(*t)
-
-def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0):
-  
-  d = Omega.shape[1]  
-  
-  nalgosN = len(algosN)  
-  nalgosL = len(algosL)
-  
-  xrec = dict()
-  err = dict()
-  relerr = dict()
-
-  # Prepare storage variables for algorithms non-Lambda
-  for i,algo in zip(np.arange(nalgosN),algosN):
-    xrec[algo[1]]   = np.zeros((d, y.shape[1]))
-    err[algo[1]]    = np.zeros(y.shape[1])
-    relerr[algo[1]] = np.zeros(y.shape[1])
-  # Prepare storage variables for algorithms with Lambda    
-  for i,algo in zip(np.arange(nalgosL),algosL):
-    xrec[algo[1]]   = np.zeros((lambdas.size, d, y.shape[1]))
-    err[algo[1]]    = np.zeros((lambdas.size, y.shape[1]))
-    relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1]))
-  
-  # Run algorithms non-Lambda
-  for iy in np.arange(y.shape[1]):
-    for algofunc,strname in algosN:
-      epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
-      xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon)
-      err[strname][iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy])
-      relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy])
-  for algotuple in algosN:
-    print algotuple[1],' : avg relative error = ',np.mean(relerr[strname])  
-
-  # Run algorithms with Lambda
-  for ilbd,lbd in zip(np.arange(lambdas.size),lambdas):
-    for iy in np.arange(y.shape[1]):
-      D = np.linalg.pinv(Omega)
-      U,S,Vt = np.linalg.svd(D)
-      for algofunc,strname in algosL:
-        epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
-        gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd)
-        xrec[strname][ilbd,:,iy] = np.dot(D,gamma)
-        err[strname][ilbd,iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy])
-        relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy])
-    print 'Lambda = ',lbd,' :'
-    for algotuple in algosL:
-      print '   ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:])
-
-  # Prepare results
-  mrelerrN = dict()
-  for algotuple in algosN:
-    mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]])
-  mrelerrL = dict()
-  for algotuple in algosL:
-    mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1)
-  
-  return mrelerrN,mrelerrL
-  
-# Script main
-if __name__ == "__main__":
-  #import cProfile
-  #cProfile.run('mainrun()', 'profile')    
-
-  mainrun()
--- a/ABSapproxPP.py	Thu Jan 19 19:12:20 2012 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,232 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Sat Nov 05 18:08:40 2011
-
-@author: Nic
-"""
-
-import numpy
-import scipy.io
-import math
-#import matplotlib.pyplot as plt
-#import matplotlib.cm as cm
-import pp
-import pyCSalgos
-import pyCSalgos.GAP.GAP
-import pyCSalgos.SL0.SL0_approx
-
-# Define functions that prepare arguments for each algorithm call
-def run_gap(y,M,Omega,epsilon):
-  gapparams = {"num_iteration" : 1000,\
-                   "greedy_level" : 0.9,\
-                   "stopping_coefficient_size" : 1e-4,\
-                   "l2solver" : 'pseudoinverse',\
-                   "noise_level": epsilon}
-  return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,numpy.zeros(Omega.shape[1]))[0]
- 
-def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd):
-  
-  N,n = Omega.shape
-  #D = numpy.linalg.pinv(Omega)
-  #U,S,Vt = numpy.linalg.svd(D)
-  aggDupper = numpy.dot(M,D)
-  aggDlower = Vt[-(N-n):,:]
-  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
-  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
-  
-  sigmamin = 0.001
-  sigma_decrease_factor = 0.5
-  mu_0 = 2
-  L = 10
-  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
-
-# Define tuples (algorithm setup function, algorithm function, name)
-gap = (run_gap, 'GAP')
-sl0 = (run_sl0, 'SL0_approx')
-
-# Define which algorithms to run
-#  1. Algorithms not depending on lambda
-algosN = gap,   # tuple
-#  2. Algorithms depending on lambda (our ABS approach)
-algosL = sl0,   # tuple
-
-def mainrun():
-  
-  nalgosN = len(algosN)  
-  nalgosL = len(algosL)
-  
-  #Set up experiment parameters
-  d = 50;
-  sigma = 2.0
-  #deltas = numpy.arange(0.05,0.95,0.05)
-  #rhos = numpy.arange(0.05,0.95,0.05)
-  deltas = numpy.array([0.05, 0.45, 0.95])
-  rhos = numpy.array([0.05, 0.45, 0.95])
-  #deltas = numpy.array([0.05])
-  #rhos = numpy.array([0.05])
-  #delta = 0.8;
-  #rho   = 0.15;
-  numvects = 10; # Number of vectors to generate
-  SNRdb = 20.;    # This is norm(signal)/norm(noise), so power, not energy
-  # Values for lambda
-  #lambdas = [0 10.^linspace(-5, 4, 10)];
-  lambdas = numpy.concatenate((numpy.array([0]), 10**numpy.linspace(-5, 4, 10)))
-
-  meanmatrix = dict()
-  for i,algo in zip(numpy.arange(nalgosN),algosN):
-    meanmatrix[algo[1]]   = numpy.zeros((rhos.size, deltas.size))
-  for i,algo in zip(numpy.arange(nalgosL),algosL):
-    meanmatrix[algo[1]]   = numpy.zeros((lambdas.size, rhos.size, deltas.size))
-
-  # PP: start job server  
-  job_server = pp.Server(ncpus = 4) 
-  idx = 0
-  jobparams = []
-  for idelta,delta in zip(numpy.arange(deltas.size),deltas):
-    for irho,rho in zip(numpy.arange(rhos.size),rhos):
-      
-      # Generate data and operator
-      Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb)
-      
-      jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0))
-      
-      idx = idx + 1
-      
-  # Run algorithms
-  modules = ('numpy','pyCSalgos','pyCSalgos.GAP.GAP','pyCSalgos.SL0.SL0_approx')
-  depfuncs = ()
-  jobs = [job_server.submit(runonce, jobparam, (run_gap,run_sl0), modules, depfuncs) for jobparam in jobparams]
-  #funcarray[idelta,irho] = job_server.submit(runonce,(algosN,algosL, Omega,y,lambdas,realnoise,M,x0), (run_gap,run_sl0))
-      #mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0)
-
-  # Get data from jobs
-  idx = 0
-  for idelta,delta in zip(numpy.arange(deltas.size),deltas):
-    for irho,rho in zip(numpy.arange(rhos.size),rhos):
-      print "***** delta = ",delta," rho = ",rho
-      mrelerrN,mrelerrL = jobs[idx]()
-      for algotuple in algosN: 
-        meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]]
-        if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]):
-          meanmatrix[algotuple[1]][irho,idelta] = 0
-      for algotuple in algosL:
-        for ilbd in numpy.arange(lambdas.size):
-          meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd]
-          if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]):
-            meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0
-      idx = idx + 1
-   
-  #  # Prepare matrices to show
-  #  showmats = dict()
-  #  for i,algo in zip(numpy.arange(nalgosN),algosN):
-  #    showmats[algo[1]]   = numpy.zeros(rhos.size, deltas.size)
-  #  for i,algo in zip(numpy.arange(nalgosL),algosL):
-  #    showmats[algo[1]]   = numpy.zeros(lambdas.size, rhos.size, deltas.size)
-
-    # Save
-    tosave = dict()
-    tosave['meanmatrix'] = meanmatrix
-    tosave['d'] = d
-    tosave['sigma'] = sigma
-    tosave['deltas'] = deltas
-    tosave['rhos'] = rhos
-    tosave['numvects'] = numvects
-    tosave['SNRdb'] = SNRdb
-    tosave['lambdas'] = lambdas
-    try:
-      scipy.io.savemat('ABSapprox.mat',tosave)
-    except TypeError:
-      print "Oops, Type Error"
-      raise    
-  # Show
-  #  for algotuple in algosN:
-  #    plt.figure()
-  #    plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest')
-  #  for algotuple in algosL:
-  #    for ilbd in numpy.arange(lambdas.size):
-  #      plt.figure()
-  #      plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest')
-  #  plt.show()
-  print "Finished."
-  
-def genData(d,sigma,delta,rho,numvects,SNRdb):
-
-  # Process parameters
-  noiselevel = 1.0 / (10.0**(SNRdb/10.0));
-  p = round(sigma*d);
-  m = round(delta*d);
-  l = round(d - rho*m);
-  
-  # Generate Omega and data based on parameters
-  Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p);
-  # Optionally make Omega more coherent
-  U,S,Vt = numpy.linalg.svd(Omega);
-  Sdnew = S * (1+numpy.arange(S.size)) # Make D coherent, not Omega!
-  Snew = numpy.vstack((numpy.diag(Sdnew), numpy.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1]))))
-  Omega = numpy.dot(U , numpy.dot(Snew,Vt))
-
-  # Generate data  
-  x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0');
-  
-  return Omega,x0,y,M,realnoise
-
-def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0):
-  
-  d = Omega.shape[1]  
-  
-  nalgosN = len(algosN)  
-  nalgosL = len(algosL)
-  
-  xrec = dict()
-  err = dict()
-  relerr = dict()
-
-  # Prepare storage variables for algorithms non-Lambda
-  for i,algo in zip(numpy.arange(nalgosN),algosN):
-    xrec[algo[1]]   = numpy.zeros((d, y.shape[1]))
-    err[algo[1]]    = numpy.zeros(y.shape[1])
-    relerr[algo[1]] = numpy.zeros(y.shape[1])
-  # Prepare storage variables for algorithms with Lambda    
-  for i,algo in zip(numpy.arange(nalgosL),algosL):
-    xrec[algo[1]]   = numpy.zeros((lambdas.size, d, y.shape[1]))
-    err[algo[1]]    = numpy.zeros((lambdas.size, y.shape[1]))
-    relerr[algo[1]] = numpy.zeros((lambdas.size, y.shape[1]))
-  
-  # Run algorithms non-Lambda
-  for iy in numpy.arange(y.shape[1]):
-    for algofunc,strname in algosN:
-      epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy])
-      xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon)
-      err[strname][iy]    = numpy.linalg.norm(x0[:,iy] - xrec[strname][:,iy])
-      relerr[strname][iy] = err[strname][iy] / numpy.linalg.norm(x0[:,iy])
-  for algotuple in algosN:
-    print algotuple[1],' : avg relative error = ',numpy.mean(relerr[strname])  
-
-  # Run algorithms with Lambda
-  for ilbd,lbd in zip(numpy.arange(lambdas.size),lambdas):
-    for iy in numpy.arange(y.shape[1]):
-      D = numpy.linalg.pinv(Omega)
-      U,S,Vt = numpy.linalg.svd(D)
-      for algofunc,strname in algosL:
-        epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy])
-        gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd)
-        xrec[strname][ilbd,:,iy] = numpy.dot(D,gamma)
-        err[strname][ilbd,iy]    = numpy.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy])
-        relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / numpy.linalg.norm(x0[:,iy])
-    print 'Lambda = ',lbd,' :'
-    for algotuple in algosL:
-      print '   ',algotuple[1],' : avg relative error = ',numpy.mean(relerr[strname][ilbd,:])
-
-  # Prepare results
-  mrelerrN = dict()
-  for algotuple in algosN:
-    mrelerrN[algotuple[1]] = numpy.mean(relerr[algotuple[1]])
-  mrelerrL = dict()
-  for algotuple in algosL:
-    mrelerrL[algotuple[1]] = numpy.mean(relerr[algotuple[1]],1)
-  
-  return mrelerrN,mrelerrL
-  
-# Script main
-if __name__ == "__main__":
-  mainrun()
\ No newline at end of file
--- a/ABSapproxSingle.py	Thu Jan 19 19:12:20 2012 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,240 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Sat Nov 05 18:08:40 2011
-
-@author: Nic
-"""
-
-import numpy as np
-import scipy.io
-import math
-doplot = True
-try: 
-  import matplotlib.pyplot as plt
-except:
-  doplot = False
-if doplot:  
-  import matplotlib.cm as cm
-import pyCSalgos
-import pyCSalgos.GAP.GAP
-import pyCSalgos.SL0.SL0_approx
-
-# Define functions that prepare arguments for each algorithm call
-def run_gap(y,M,Omega,epsilon):
-  gapparams = {"num_iteration" : 1000,\
-                   "greedy_level" : 0.9,\
-                   "stopping_coefficient_size" : 1e-4,\
-                   "l2solver" : 'pseudoinverse',\
-                   "noise_level": epsilon}
-  return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0]
- 
-def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd):
-  
-  N,n = Omega.shape
-  #D = np.linalg.pinv(Omega)
-  #U,S,Vt = np.linalg.svd(D)
-  aggDupper = np.dot(M,D)
-  aggDlower = Vt[-(N-n):,:]
-  aggD = np.concatenate((aggDupper, lbd * aggDlower))
-  aggy = np.concatenate((y, np.zeros(N-n)))
-  
-  sigmamin = 0.001
-  sigma_decrease_factor = 0.5
-  mu_0 = 2
-  L = 10
-  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
-
-def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd):
-  
-  N,n = Omega.shape
-  #D = np.linalg.pinv(Omega)
-  #U,S,Vt = np.linalg.svd(D)
-  aggDupper = np.dot(M,D)
-  aggDlower = Vt[-(N-n):,:]
-  aggD = np.concatenate((aggDupper, lbd * aggDlower))
-  aggy = np.concatenate((y, np.zeros(N-n)))
-  
-  sigmamin = 0.001
-  sigma_decrease_factor = 0.5
-  mu_0 = 2
-  L = 10
-  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
-
-
-# Define tuples (algorithm setup function, algorithm function, name)
-gap = (run_gap, 'GAP')
-sl0 = (run_sl0, 'SL0_approx')
-
-# Define which algorithms to run
-#  1. Algorithms not depending on lambda
-algosN = gap,   # tuple
-#  2. Algorithms depending on lambda (our ABS approach)
-algosL = sl0,   # tuple
-
-def mainrun():
-  
-  nalgosN = len(algosN)  
-  nalgosL = len(algosL)
-  
-  #Set up experiment parameters
-  d = 50.0;
-  sigma = 2.0
-  #deltas = np.arange(0.05,1.,0.05)
-  #rhos = np.arange(0.05,1.,0.05)
-  deltas = np.array([0.05, 0.45, 0.95])
-  rhos = np.array([0.05, 0.45, 0.95])
-  #deltas = np.array([0.05])
-  #rhos = np.array([0.05])
-  #delta = 0.8;
-  #rho   = 0.15;
-  numvects = 100; # Number of vectors to generate
-  SNRdb = 20.;    # This is norm(signal)/norm(noise), so power, not energy
-  # Values for lambda
-  #lambdas = [0 10.^linspace(-5, 4, 10)];
-  #lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10)))
-  lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000])
-
-  meanmatrix = dict()
-  for i,algo in zip(np.arange(nalgosN),algosN):
-    meanmatrix[algo[1]]   = np.zeros((rhos.size, deltas.size))
-  for i,algo in zip(np.arange(nalgosL),algosL):
-    meanmatrix[algo[1]]   = np.zeros((lambdas.size, rhos.size, deltas.size))
-  
-  for idelta,delta in zip(np.arange(deltas.size),deltas):
-    for irho,rho in zip(np.arange(rhos.size),rhos):
-      
-      # Generate data and operator
-      Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb)
-      
-      # Run algorithms
-      print "***** delta = ",delta," rho = ",rho
-      mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0)
-      
-      for algotuple in algosN: 
-        meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]]
-        if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]):
-          meanmatrix[algotuple[1]][irho,idelta] = 0
-      for algotuple in algosL:
-        for ilbd in np.arange(lambdas.size):
-          meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd]
-          if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]):
-            meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0
-   
-  #  # Prepare matrices to show
-  #  showmats = dict()
-  #  for i,algo in zip(np.arange(nalgosN),algosN):
-  #    showmats[algo[1]]   = np.zeros(rhos.size, deltas.size)
-  #  for i,algo in zip(np.arange(nalgosL),algosL):
-  #    showmats[algo[1]]   = np.zeros(lambdas.size, rhos.size, deltas.size)
-
-    # Save
-    tosave = dict()
-    tosave['meanmatrix'] = meanmatrix
-    tosave['d'] = d
-    tosave['sigma'] = sigma
-    tosave['deltas'] = deltas
-    tosave['rhos'] = rhos
-    tosave['numvects'] = numvects
-    tosave['SNRdb'] = SNRdb
-    tosave['lambdas'] = lambdas
-    try:
-      scipy.io.savemat('ABSapprox.mat',tosave)
-    except TypeError:
-      print "Oops, Type Error"
-      raise    
-  # Show
-  if doplot:
-    for algotuple in algosN:
-      plt.figure()
-      plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower')
-    for algotuple in algosL:
-      for ilbd in np.arange(lambdas.size):
-        plt.figure()
-        plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower')
-    plt.show()
-  print "Finished."
-  
-def genData(d,sigma,delta,rho,numvects,SNRdb):
-
-  # Process parameters
-  noiselevel = 1.0 / (10.0**(SNRdb/10.0));
-  p = round(sigma*d);
-  m = round(delta*d);
-  l = round(d - rho*m);
-  
-  # Generate Omega and data based on parameters
-  Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p);
-  # Optionally make Omega more coherent
-  U,S,Vt = np.linalg.svd(Omega);
-  Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega!
-  Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1]))))
-  Omega = np.dot(U , np.dot(Snew,Vt))
-
-  # Generate data  
-  x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0');
-  
-  return Omega,x0,y,M,realnoise
-
-def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0):
-  
-  d = Omega.shape[1]  
-  
-  nalgosN = len(algosN)  
-  nalgosL = len(algosL)
-  
-  xrec = dict()
-  err = dict()
-  relerr = dict()
-
-  # Prepare storage variables for algorithms non-Lambda
-  for i,algo in zip(np.arange(nalgosN),algosN):
-    xrec[algo[1]]   = np.zeros((d, y.shape[1]))
-    err[algo[1]]    = np.zeros(y.shape[1])
-    relerr[algo[1]] = np.zeros(y.shape[1])
-  # Prepare storage variables for algorithms with Lambda    
-  for i,algo in zip(np.arange(nalgosL),algosL):
-    xrec[algo[1]]   = np.zeros((lambdas.size, d, y.shape[1]))
-    err[algo[1]]    = np.zeros((lambdas.size, y.shape[1]))
-    relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1]))
-  
-  # Run algorithms non-Lambda
-  for iy in np.arange(y.shape[1]):
-    for algofunc,strname in algosN:
-      epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
-      xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon)
-      err[strname][iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy])
-      relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy])
-  for algotuple in algosN:
-    print algotuple[1],' : avg relative error = ',np.mean(relerr[strname])  
-
-  # Run algorithms with Lambda
-  for ilbd,lbd in zip(np.arange(lambdas.size),lambdas):
-    for iy in np.arange(y.shape[1]):
-      D = np.linalg.pinv(Omega)
-      U,S,Vt = np.linalg.svd(D)
-      for algofunc,strname in algosL:
-        epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
-        gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd)
-        xrec[strname][ilbd,:,iy] = np.dot(D,gamma)
-        err[strname][ilbd,iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy])
-        relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy])
-    print 'Lambda = ',lbd,' :'
-    for algotuple in algosL:
-      print '   ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:])
-
-  # Prepare results
-  mrelerrN = dict()
-  for algotuple in algosN:
-    mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]])
-  mrelerrL = dict()
-  for algotuple in algosL:
-    mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1)
-  
-  return mrelerrN,mrelerrL
-  
-# Script main
-if __name__ == "__main__":
-
-  #import cProfile
-  #cProfile.run('mainrun()', 'profile')    
-  mainrun()
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ABSlambda.py	Fri Mar 09 16:33:35 2012 +0200
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Mar 09 14:06:13 2012
+
+@author: ncleju
+"""
+
+import numpy
+import pyCSalgos.BP.l1qc
+import pyCSalgos.SL0.SL0_qc
+import pyCSalgos.OMP.omp_QR
+import pyCSalgos.TST.RecommendedTST
+
+def sl0(y,M,Omega,epsilon,lbd,sigma_min, sigma_decrease_factor=0.5, mu_0=2, L=3, A_pinv=None, true_s=None):
+  
+  N,n = Omega.shape
+  D = numpy.linalg.pinv(Omega)
+  U,S,Vt = numpy.linalg.svd(D)
+  aggDupper = numpy.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
+  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
+  
+  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L,A_pinv,true_s)
+  
+def l1qc(y,M,Omega,epsilon,lbd, lbtol=1e-3, mu=10, cgtol=1e-8, cgmaxiter=200, verbose=False):
+  
+  N,n = Omega.shape
+  D = numpy.linalg.pinv(Omega)
+  U,S,Vt = numpy.linalg.svd(D)
+  aggDupper = numpy.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
+  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
+
+  return pyCSalgos.BP.l1qc.l1qc_logbarrier(x0,aggD,aggD.T,aggy,epsilon, lbtol, mu, cgtol, cgmaxiter, verbose)
+
+def ompeps(y,M,Omega,epsilon,lbd):
+  
+  N,n = Omega.shape
+  D = numpy.linalg.pinv(Omega)
+  U,S,Vt = numpy.linalg.svd(D)
+  aggDupper = numpy.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
+  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
+  
+  opts = dict()
+  opts['stopCrit'] = 'mse'
+  opts['stopTol'] = epsilon**2 / aggy.size
+  return pyCSalgos.OMP.omp_QR.greed_omp_qr(aggy,aggD,aggD.shape[1],opts)[0]
+  
+def tst_recom(y,M,Omega,epsilon,lbd, nsweep=300, xinitial=None, ro=None):
+  
+  N,n = Omega.shape
+  D = numpy.linalg.pinv(Omega)
+  U,S,Vt = numpy.linalg.svd(D)
+  aggDupper = numpy.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
+  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
+  
+  nsweep = 300
+  tol = epsilon / numpy.linalg.norm(aggy)
+  return pyCSalgos.RecomTST.RecommendedTST.RecommendedTST(aggD, aggy, nsweep, tol, xinitial, ro)
+  
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ABSmixed.py	Fri Mar 09 16:33:35 2012 +0200
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Mar 09 14:06:13 2012
+
+@author: ncleju
+"""
+
+import numpy
+import pyCSalgos.BP.l1qec
+import pyCSalgos.SL0.SL0_approx
+
+def bp(y,M,Omega,epsilon, x0, lbtol=1e-3, mu=10, cgtol=1e-8, cgmaxiter=200, verbose=False):
+  
+  N,n = Omega.shape
+  D = numpy.linalg.pinv(Omega)
+  U,S,Vt = numpy.linalg.svd(D)
+  Aeps = numpy.dot(M,D)
+  Aexact = Vt[-(N-n):,:]
+  
+  return numpy.dot(D , pyCSalgos.BP.l1qec.l1qec_logbarrier(x0,Aeps,Aeps.T,y,epsilon,Aexact,Aexact.T,numpy.zeros(N-n), lbtol, mu, cgtol, cgmaxiter, verbose))
+
+def sl0(y,M,Omega,epsilon, sigma_min, sigma_decrease_factor=0.5, mu_0=2, L=3, Aeps_pinv=None, Aexact_pinv=None, true_s=None):
+  
+  N,n = Omega.shape
+  D = numpy.linalg.pinv(Omega)
+  U,S,Vt = numpy.linalg.svd(D)
+  Aeps = numpy.dot(M,D)
+  Aexact = Vt[-(N-n):,:]
+  
+  return numpy.dot(D, pyCSalgos.SL0.SL0_approx.SL0_approx_analysis(Aeps,Aexact,y,epsilon,sigmamin,sigma_decrease_factor,mu_0,L,Aeps_pinv,Aexact_pinv,true_s))
+  
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/AnalysisGenerate.py	Fri Mar 09 16:33:35 2012 +0200
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Dec 08 10:56:56 2011
+
+@author: ncleju
+"""
+
+import numpy
+import numpy.linalg
+
+from numpy.random import RandomState
+rng = RandomState()
+
+def Generate_Analysis_Operator(d, p):
+  # generate random tight frame with equal column norms
+  if p == d:
+    T = rng.randn(d,d);
+    [Omega, discard] = numpy.qr(T);
+  else:
+    Omega = rng.randn(p, d);
+    T = numpy.zeros((p, d));
+    tol = 1e-8;
+    max_j = 200;
+    j = 1;
+    while (sum(sum(abs(T-Omega))) > numpy.dot(tol,numpy.dot(p,d)) and j < max_j):
+        j = j + 1;
+        T = Omega;
+        [U, S, Vh] = numpy.linalg.svd(Omega);
+        V = Vh.T
+        #Omega = U * [eye(d); zeros(p-d,d)] * V';
+        Omega2 = numpy.dot(numpy.dot(U, numpy.concatenate((numpy.eye(d), numpy.zeros((p-d,d))))), V.transpose())
+        #Omega = diag(1./sqrt(diag(Omega*Omega')))*Omega;
+        Omega = numpy.dot(numpy.diag(1.0 / numpy.sqrt(numpy.diag(numpy.dot(Omega2,Omega2.transpose())))), Omega2)
+    #end
+    ##disp(j);
+    #end
+  return Omega
+
+
+def Generate_Data_Known_Omega(Omega, d,p,m,k,noiselevel, numvectors, normstr):
+  #function [x0,y,M,LambdaMat] = Generate_Data_Known_Omega(Omega, d,p,m,k,noiselevel, numvectors, normstr)
+  
+  # Building an analysis problem, which includes the ingredients: 
+  #   - Omega - the analysis operator of size p*d
+  #   - M is anunderdetermined measurement matrix of size m*d (m<d)
+  #   - x0 is a vector of length d that satisfies ||Omega*x0||=p-k
+  #   - Lambda is the true location of these k zeros in Omega*x0
+  #   - a measurement vector y0=Mx0 is computed
+  #   - noise contaminated measurement vector y is obtained by
+  #     y = y0 + n where n is an additive gaussian noise with norm(n,2)/norm(y0,2) = noiselevel
+  # Added by Nic:
+  #   - Omega = analysis operator
+  #   - normstr: if 'l0', generate l0 sparse vector (unchanged). If 'l1',
+  #   generate a vector of Laplacian random variables (gamma) and
+  #   pseudoinvert to find x
+
+  # Omega is known as input parameter
+  #Omega=Generate_Analysis_Operator(d, p);
+  # Omega = randn(p,d);
+  # for i = 1:size(Omega,1)
+  #     Omega(i,:) = Omega(i,:) / norm(Omega(i,:));
+  # end
+  
+  #Init
+  LambdaMat = numpy.zeros((k,numvectors))
+  x0 = numpy.zeros((d,numvectors))
+  y = numpy.zeros((m,numvectors))
+  realnoise = numpy.zeros((m,numvectors))
+  
+  M = rng.randn(m,d);
+  
+  #for i=1:numvectors
+  for i in range(0,numvectors):
+    
+    # Generate signals
+    #if strcmp(normstr,'l0')
+    if normstr == 'l0':
+        # Unchanged
+        
+        #Lambda=randperm(p); 
+        Lambda = rng.permutation(int(p));
+        Lambda = numpy.sort(Lambda[0:k]); 
+        LambdaMat[:,i] = Lambda; # store for output
+        
+        # The signal is drawn at random from the null-space defined by the rows 
+        # of the matreix Omega(Lambda,:)
+        [U,D,Vh] = numpy.linalg.svd(Omega[Lambda,:]);
+        V = Vh.T
+        NullSpace = V[:,k:];
+        #print numpy.dot(NullSpace, rng.randn(d-k,1)).shape
+        #print x0[:,i].shape
+        x0[:,i] = numpy.squeeze(numpy.dot(NullSpace, rng.randn(d-k,1)));
+        # Nic: add orthogonality noise
+        #     orthonoiseSNRdb = 6;
+        #     n = randn(p,1);
+        #     #x0(:,i) = x0(:,i) + n / norm(n)^2 * norm(x0(:,i))^2 / 10^(orthonoiseSNRdb/10);
+        #     n = n / norm(n)^2 * norm(Omega * x0(:,i))^2 / 10^(orthonoiseSNRdb/10);
+        #     x0(:,i) = pinv(Omega) * (Omega * x0(:,i) + n);
+        
+    #elseif strcmp(normstr, 'l1')
+    elif normstr == 'l1':
+        print('Nic says: not implemented yet')
+        raise Exception('Nic says: not implemented yet')
+        #gamma = laprnd(p,1,0,1);
+        #x0(:,i) = Omega \ gamma;
+    else:
+        #error('normstr must be l0 or l1!');
+        print('Nic says: not implemented yet')
+        raise Exception('Nic says: not implemented yet')
+    #end
+    
+    # Acquire measurements
+    y[:,i]  = numpy.dot(M, x0[:,i])
+
+    # Add noise
+    t_norm = numpy.linalg.norm(y[:,i],2)
+    n = numpy.squeeze(rng.randn(m, 1))
+    # In case n i just a number, nuit an array, norm() fails
+    if n.ndim == 0:
+      nnorm = abs(n)
+    else:
+      nnorm = numpy.linalg.norm(n, 2);
+    y[:,i] = y[:,i] + noiselevel * t_norm * n / nnorm
+    realnoise[:,i] = noiselevel * t_norm * n / nnorm
+  #end
+
+  return x0,y,M,LambdaMat,realnoise
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/algos.py	Fri Mar 09 16:33:35 2012 +0200
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Dec 07 14:06:13 2011
+
+@author: ncleju
+"""
+
+import numpy
+import pyCSalgos
+import pyCSalgos.GAP.GAP
+import pyCSalgos.BP.l1qc
+import pyCSalgos.BP.l1qec
+import pyCSalgos.SL0.SL0_approx
+import pyCSalgos.OMP.omp_QR
+import pyCSalgos.RecomTST.RecommendedTST
+import pyCSalgos.NESTA.NESTA
+
+#==========================
+# Algorithm functions
+#==========================
+def run_gap(y,M,Omega,epsilon):
+  gapparams = {"num_iteration" : 1000,\
+                   "greedy_level" : 0.9,\
+                   "stopping_coefficient_size" : 1e-4,\
+                   "l2solver" : 'pseudoinverse',\
+                   "noise_level": epsilon}
+  return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,numpy.zeros(Omega.shape[1]))[0]
+
+def run_bp_analysis(y,M,Omega,epsilon):
+  
+  N,n = Omega.shape
+  D = numpy.linalg.pinv(Omega)
+  U,S,Vt = numpy.linalg.svd(D)
+  Aeps = numpy.dot(M,D)
+  Aexact = Vt[-(N-n):,:]
+  # We don't ned any aggregate matrices anymore
+  
+  x0 = numpy.zeros(N)
+  return numpy.dot(D , pyCSalgos.BP.l1qec.l1qec_logbarrier(x0,Aeps,Aeps.T,y,epsilon,Aexact,Aexact.T,numpy.zeros(N-n)))
+
+def run_sl0_analysis(y,M,Omega,epsilon):
+  
+  N,n = Omega.shape
+  D = numpy.linalg.pinv(Omega)
+  U,S,Vt = numpy.linalg.svd(D)
+  Aeps = numpy.dot(M,D)
+  Aexact = Vt[-(N-n):,:]
+  # We don't ned any aggregate matrices anymore
+  
+  sigmamin = 0.001
+  sigma_decrease_factor = 0.5
+  mu_0 = 2
+  L = 10
+  return numpy.dot(D , pyCSalgos.SL0.SL0_approx.SL0_approx_analysis(Aeps,Aexact,y,epsilon,sigmamin,sigma_decrease_factor,mu_0,L))
+
+def run_nesta(y,M,Omega,epsilon):
+  
+  U,S,V = numpy.linalg.svd(M, full_matrices = True)
+  V = V.T         # Make like Matlab
+  m,n = M.shape   # Make like Matlab
+  S = numpy.hstack((numpy.diag(S), numpy.zeros((m,n-m))))  
+
+  opt_muf = 1e-3
+  optsUSV = {'U':U, 'S':S, 'V':V}
+  opts = {'U':Omega, 'Ut':Omega.T.copy(), 'USV':optsUSV, 'TolVar':1e-5, 'Verbose':0}
+  return pyCSalgos.NESTA.NESTA.NESTA(M, None, y, opt_muf, epsilon, opts)[0]
+
+
+def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd):
+  
+  N,n = Omega.shape
+  #D = numpy.linalg.pinv(Omega)
+  #U,S,Vt = numpy.linalg.svd(D)
+  aggDupper = numpy.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
+  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
+  
+  sigmamin = 0.001
+  sigma_decrease_factor = 0.5
+  mu_0 = 2
+  L = 10
+  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
+  
+def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd):
+  
+  N,n = Omega.shape
+  #D = numpy.linalg.pinv(Omega)
+  #U,S,Vt = numpy.linalg.svd(D)
+  aggDupper = numpy.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
+  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
+
+  x0 = numpy.zeros(N)
+  return pyCSalgos.BP.l1qc.l1qc_logbarrier(x0,aggD,aggD.T,aggy,epsilon)
+
+def run_ompeps(y,M,Omega,D,U,S,Vt,epsilon,lbd):
+  
+  N,n = Omega.shape
+  #D = numpy.linalg.pinv(Omega)
+  #U,S,Vt = numpy.linalg.svd(D)
+  aggDupper = numpy.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
+  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
+  
+  opts = dict()
+  opts['stopCrit'] = 'mse'
+  opts['stopTol'] = epsilon**2 / aggy.size
+  return pyCSalgos.OMP.omp_QR.greed_omp_qr(aggy,aggD,aggD.shape[1],opts)[0]
+  
+def run_tst(y,M,Omega,D,U,S,Vt,epsilon,lbd):
+  
+  N,n = Omega.shape
+  #D = numpy.linalg.pinv(Omega)
+  #U,S,Vt = numpy.linalg.svd(D)
+  aggDupper = numpy.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
+  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
+  
+  nsweep = 300
+  tol = epsilon / numpy.linalg.norm(aggy)
+  return pyCSalgos.RecomTST.RecommendedTST.RecommendedTST(aggD, aggy, nsweep=nsweep, tol=tol)
+
+
+#==========================
+# Define tuples (algorithm function, name)
+#==========================
+gap = (run_gap, 'GAP')
+sl0 = (run_sl0, 'SL0a')
+sl0analysis = (run_sl0_analysis, 'SL0a2')
+bpanalysis = (run_bp_analysis, 'BPa2')
+nesta = (run_nesta, 'NESTA')
+bp  = (run_bp, 'BP')
+ompeps = (run_ompeps, 'OMPeps')
+tst = (run_tst, 'TST')
\ No newline at end of file