changeset 27:1a88766113a9

A lot of things. Fixed problem in Gap Fixed multiprocessor versions of script (both PP and multiproc)
author nikcleju
date Wed, 09 Nov 2011 18:18:42 +0000
parents f0f77d92e0c1
children efe3f43a2b59
files pyCSalgos/GAP/GAP.py scripts/ABSapprox.py scripts/ABSapproxMultiproc.py scripts/ABSapproxPP.py scripts/utils.py
diffstat 5 files changed, 439 insertions(+), 131 deletions(-) [+]
line wrap: on
line diff
--- a/pyCSalgos/GAP/GAP.py	Wed Nov 09 11:11:39 2011 +0000
+++ b/pyCSalgos/GAP/GAP.py	Wed Nov 09 18:18:42 2011 +0000
@@ -7,10 +7,11 @@
 
 #from numpy import *
 #from scipy import *
-import numpy as np
+import numpy
+import numpy.linalg
 import scipy as sp
-import scipy.stats #from scipy import stats
-import scipy.linalg #from scipy import lnalg
+#import scipy.stats #from scipy import stats
+#import scipy.linalg #from scipy import lnalg
 import math
 
 from numpy.random import RandomState
@@ -20,22 +21,22 @@
   # generate random tight frame with equal column norms
   if p == d:
     T = rng.randn(d,d);
-    [Omega, discard] = np.qr(T);
+    [Omega, discard] = numpy.qr(T);
   else:
     Omega = rng.randn(p, d);
-    T = np.zeros((p, d));
+    T = numpy.zeros((p, d));
     tol = 1e-8;
     max_j = 200;
     j = 1;
-    while (sum(sum(abs(T-Omega))) > np.dot(tol,np.dot(p,d)) and j < max_j):
+    while (sum(sum(abs(T-Omega))) > numpy.dot(tol,numpy.dot(p,d)) and j < max_j):
         j = j + 1;
         T = Omega;
-        [U, S, Vh] = sp.linalg.svd(Omega);
+        [U, S, Vh] = numpy.linalg.svd(Omega);
         V = Vh.T
         #Omega = U * [eye(d); zeros(p-d,d)] * V';
-        Omega2 = np.dot(np.dot(U, np.concatenate((np.eye(d), np.zeros((p-d,d))))), V.transpose())
+        Omega2 = numpy.dot(numpy.dot(U, numpy.concatenate((numpy.eye(d), numpy.zeros((p-d,d))))), V.transpose())
         #Omega = diag(1./sqrt(diag(Omega*Omega')))*Omega;
-        Omega = np.dot(np.diag(1.0 / np.sqrt(np.diag(np.dot(Omega2,Omega2.transpose())))), Omega2)
+        Omega = numpy.dot(numpy.diag(1.0 / numpy.sqrt(numpy.diag(numpy.dot(Omega2,Omega2.transpose())))), Omega2)
     #end
     ##disp(j);
     #end
@@ -67,10 +68,10 @@
   # end
   
   #Init
-  LambdaMat = np.zeros((k,numvectors))
-  x0 = np.zeros((d,numvectors))
-  y = np.zeros((m,numvectors))
-  realnoise = np.zeros((m,numvectors))
+  LambdaMat = numpy.zeros((k,numvectors))
+  x0 = numpy.zeros((d,numvectors))
+  y = numpy.zeros((m,numvectors))
+  realnoise = numpy.zeros((m,numvectors))
   
   M = rng.randn(m,d);
   
@@ -84,17 +85,17 @@
         
         #Lambda=randperm(p); 
         Lambda = rng.permutation(int(p));
-        Lambda = np.sort(Lambda[0:k]); 
+        Lambda = numpy.sort(Lambda[0:k]); 
         LambdaMat[:,i] = Lambda; # store for output
         
         # The signal is drawn at random from the null-space defined by the rows 
         # of the matreix Omega(Lambda,:)
-        [U,D,Vh] = sp.linalg.svd(Omega[Lambda,:]);
+        [U,D,Vh] = numpy.linalg.svd(Omega[Lambda,:]);
         V = Vh.T
         NullSpace = V[:,k:];
-        #print np.dot(NullSpace, rng.randn(d-k,1)).shape
+        #print numpy.dot(NullSpace, rng.randn(d-k,1)).shape
         #print x0[:,i].shape
-        x0[:,i] = np.squeeze(np.dot(NullSpace, rng.randn(d-k,1)));
+        x0[:,i] = numpy.squeeze(numpy.dot(NullSpace, rng.randn(d-k,1)));
         # Nic: add orthogonality noise
         #     orthonoiseSNRdb = 6;
         #     n = randn(p,1);
@@ -115,13 +116,13 @@
     #end
     
     # Acquire measurements
-    y[:,i]  = np.dot(M, x0[:,i])
+    y[:,i]  = numpy.dot(M, x0[:,i])
 
     # Add noise
-    t_norm = np.linalg.norm(y[:,i],2);
-    n = np.squeeze(rng.randn(m, 1));
-    y[:,i] = y[:,i] + noiselevel * t_norm * n / np.linalg.norm(n, 2);
-    realnoise[:,i] = noiselevel * t_norm * n / np.linalg.norm(n, 2)
+    t_norm = numpy.linalg.norm(y[:,i],2);
+    n = numpy.squeeze(rng.randn(m, 1));
+    y[:,i] = y[:,i] + noiselevel * t_norm * n / numpy.linalg.norm(n, 2);
+    realnoise[:,i] = noiselevel * t_norm * n / numpy.linalg.norm(n, 2)
   #end
 
   return x0,y,M,LambdaMat,realnoise
@@ -173,8 +174,8 @@
       if M.dtype == 'float64' and Omega.dtype == 'double':
         while True:
             alpha = math.sqrt(lagmult);
-            xhat = np.linalg.lstsq(np.concatenate((M, alpha*Omega[Lambdahat,:])), np.concatenate((y, np.zeros(Lambdahat.size))))[0]
-            temp = np.linalg.norm(y - np.dot(M,xhat), 2);
+            xhat = numpy.linalg.lstsq(numpy.concatenate((M, alpha*Omega[Lambdahat,:])), numpy.concatenate((y, numpy.zeros(Lambdahat.size))))[0]
+            temp = numpy.linalg.norm(y - numpy.dot(M,xhat), 2);
             #disp(['fidelity error=', num2str(temp), ' lagmult=', num2str(lagmult)]);
             if temp <= params['noise_level']:
                 was_feasible = True;
@@ -191,7 +192,7 @@
             if lagmult < lagmultmin or lagmult > lagmultmax:
                 break;
             xprev = xhat.copy();
-        arepr = np.dot(Omega[Lambdahat, :], xhat);
+        arepr = numpy.dot(Omega[Lambdahat, :], xhat);
         return xhat,arepr,lagmult;
 
 
@@ -202,9 +203,9 @@
     if hasattr(MH, '__call__'):
         b = MH(y);
     else:
-        b = np.dot(MH, y);
+        b = numpy.dot(MH, y);
     
-    norm_b = np.linalg.norm(b, 2);
+    norm_b = numpy.linalg.norm(b, 2);
     xhat = xinit.copy();
     xprev = xinit.copy();
     residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b;
@@ -213,26 +214,26 @@
     
     while iter < params.max_inner_iteration:
         iter = iter + 1;
-        alpha = np.linalg.norm(residual,2)**2 / np.dot(direction.T, TheHermitianMatrix(direction, M, MH, Omega, OmegaH, Lambdahat, lagmult));
+        alpha = numpy.linalg.norm(residual,2)**2 / numpy.dot(direction.T, TheHermitianMatrix(direction, M, MH, Omega, OmegaH, Lambdahat, lagmult));
         xhat = xhat + alpha*direction;
         prev_residual = residual.copy();
         residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b;
-        beta = np.linalg.norm(residual,2)**2 / np.linalg.norm(prev_residual,2)**2;
+        beta = numpy.linalg.norm(residual,2)**2 / numpy.linalg.norm(prev_residual,2)**2;
         direction = -residual + beta*direction;
         
-        if np.linalg.norm(residual,2)/norm_b < params['l2_accuracy']*(lagmult**(accuracy_adjustment_exponent)) or iter == params['max_inner_iteration']:
+        if numpy.linalg.norm(residual,2)/norm_b < params['l2_accuracy']*(lagmult**(accuracy_adjustment_exponent)) or iter == params['max_inner_iteration']:
             #if strcmp(class(M), 'function_handle')
             if hasattr(M, '__call__'):
-                temp = np.linalg.norm(y-M(xhat), 2);
+                temp = numpy.linalg.norm(y-M(xhat), 2);
             else:
-                temp = np.linalg.norm(y-np.dot(M,xhat), 2);
+                temp = numpy.linalg.norm(y-numpy.dot(M,xhat), 2);
             
             #if strcmp(class(Omega), 'function_handle')
             if hasattr(Omega, '__call__'):
                 u = Omega(xhat);
-                u = math.sqrt(lagmult)*np.linalg.norm(u(Lambdahat), 2);
+                u = math.sqrt(lagmult)*numpy.linalg.norm(u(Lambdahat), 2);
             else:
-                u = math.sqrt(lagmult)*np.linalg.norm(Omega[Lambdahat,:]*xhat, 2);
+                u = math.sqrt(lagmult)*numpy.linalg.norm(Omega[Lambdahat,:]*xhat, 2);
             
             
             #disp(['residual=', num2str(norm(residual,2)), ' norm_b=', num2str(norm_b), ' omegapart=', num2str(u), ' fidelity error=', num2str(temp), ' lagmult=', num2str(lagmult), ' iter=', num2str(iter)]);
@@ -285,7 +286,7 @@
         temp = Omega(xhat);
         arepr = temp(Lambdahat);
     else:    ## here Omega is assumed to be a matrix
-        arepr = np.dot(Omega[Lambdahat, :], xhat);
+        arepr = numpy.dot(Omega[Lambdahat, :], xhat);
     
     return xhat,arepr,lagmult
 
@@ -299,15 +300,15 @@
     if hasattr(M, '__call__'):
         w = MH(M(x));
     else:    ## M and MH are matrices
-        w = np.dot(np.dot(MH, M), x);
+        w = numpy.dot(numpy.dot(MH, M), x);
     
     if hasattr(Omega, '__call__'):
         v = Omega(x);
-        vt = np.zeros(v.size);
+        vt = numpy.zeros(v.size);
         vt[L] = v[L].copy();
         w = w + lm*OmegaH(vt);
     else:    ## Omega is assumed to be a matrix and OmegaH is its conjugate transpose
-        w = w + lm*np.dot(np.dot(OmegaH[:, L],Omega[L, :]),x);
+        w = w + lm*numpy.dot(numpy.dot(OmegaH[:, L],Omega[L, :]),x);
     
     return w
 
@@ -389,7 +390,7 @@
   #    p = size(Omega, 1);
   #end
   if hasattr(Omega, '__call__'):
-      p = Omega(np.zeros((d,1)))
+      p = Omega(numpy.zeros((d,1)))
   else:
       p = Omega.shape[0]
   
@@ -397,14 +398,14 @@
   iter = 0
   lagmult = 1e-4
   #Lambdahat = 1:p;
-  Lambdahat = np.arange(p)
+  Lambdahat = numpy.arange(p)
   #while iter < params.num_iteration
   while iter < params["num_iteration"]:
       iter = iter + 1
       #[xhat, analysis_repr, lagmult] = ArgminOperL2Constrained(y, M, MH, Omega, OmegaH, Lambdahat, xinit, lagmult, params);
       xhat,analysis_repr,lagmult = ArgminOperL2Constrained(y, M, MH, Omega, OmegaH, Lambdahat, xinit, lagmult, params)
       #[to_be_removed, maxcoef] = FindRowsToRemove(analysis_repr, params.greedy_level);
-      to_be_removed, maxcoef = FindRowsToRemove(analysis_repr, params["greedy_level"])
+      to_be_removed,maxcoef = FindRowsToRemove(analysis_repr, params["greedy_level"])
       #disp(['** maxcoef=', num2str(maxcoef), ' target=', num2str(params.stopping_coefficient_size), ' rows_remaining=', num2str(length(Lambdahat)), ' lagmult=', num2str(lagmult)]);
       #print '** maxcoef=',maxcoef,' target=',params['stopping_coefficient_size'],' rows_remaining=',Lambdahat.size,' lagmult=',lagmult
       if check_stopping_criteria(xhat, xinit, maxcoef, lagmult, Lambdahat, params):
@@ -412,9 +413,7 @@
 
       xinit = xhat.copy()
       #Lambdahat[to_be_removed] = []
-      # TODO: find what why squeeze() is needed here!!
-      if len(to_be_removed) != 0:
-        Lambdahat = np.delete(Lambdahat.squeeze(),to_be_removed)
+      Lambdahat = numpy.delete(Lambdahat.squeeze(),to_be_removed)
   
       #n = sqrt(d);
       #figure(9);
@@ -431,13 +430,13 @@
       #imshow(reshape(real(xhat), n, n));
   
   #return;
-  return xhat, Lambdahat
+  return xhat,Lambdahat
  
 def FindRowsToRemove(analysis_repr, greedy_level):
 #function [to_be_removed, maxcoef] = FindRowsToRemove(analysis_repr, greedy_level)
 
     #abscoef = abs(analysis_repr(:));
-    abscoef = np.abs(analysis_repr)
+    abscoef = numpy.abs(analysis_repr)
     #n = length(abscoef);
     n = abscoef.size
     #maxcoef = max(abscoef);
@@ -449,9 +448,10 @@
         qq = maxcoef*greedy_level
 
     #to_be_removed = find(abscoef >= qq);
-    to_be_removed = np.nonzero(abscoef >= qq)
+    # [0] needed because nonzero() returns a tuple of arrays!
+    to_be_removed = numpy.nonzero(abscoef >= qq)[0]
     #return;
-    return to_be_removed, maxcoef
+    return to_be_removed,maxcoef
 
 def check_stopping_criteria(xhat, xinit, maxcoef, lagmult, Lambdahat, params):
 #function r = check_stopping_criteria(xhat, xinit, maxcoef, lagmult, Lambdahat, params)
@@ -465,7 +465,7 @@
         return 1
 
     #if isfield(params, 'stopping_relative_solution_change') && norm(xhat-xinit)/norm(xhat) < params.stopping_relative_solution_change
-    if ('stopping_relative_solution_change' in params) and np.linalg.norm(xhat-xinit)/np.linalg.norm(xhat) < params['stopping_relative_solution_change']:
+    if ('stopping_relative_solution_change' in params) and numpy.linalg.norm(xhat-xinit)/numpy.linalg.norm(xhat) < params['stopping_relative_solution_change']:
         return 1
 
     #if isfield(params, 'stopping_cosparsity') && length(Lambdahat) < params.stopping_cosparsity
--- a/scripts/ABSapprox.py	Wed Nov 09 11:11:39 2011 +0000
+++ b/scripts/ABSapprox.py	Wed Nov 09 18:18:42 2011 +0000
@@ -8,8 +8,13 @@
 import numpy as np
 import scipy.io
 import math
-#import matplotlib.pyplot as plt
-#import matplotlib.cm as cm
+doplot = True
+try: 
+  import matplotlib.pyplot as plt
+except:
+  doplot = False
+if doplot:  
+  import matplotlib.cm as cm
 import pyCSalgos
 import pyCSalgos.GAP.GAP
 import pyCSalgos.SL0.SL0_approx
@@ -39,6 +44,23 @@
   L = 10
   return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
 
+def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd):
+  
+  N,n = Omega.shape
+  #D = np.linalg.pinv(Omega)
+  #U,S,Vt = np.linalg.svd(D)
+  aggDupper = np.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = np.concatenate((aggDupper, lbd * aggDlower))
+  aggy = np.concatenate((y, np.zeros(N-n)))
+  
+  sigmamin = 0.001
+  sigma_decrease_factor = 0.5
+  mu_0 = 2
+  L = 10
+  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
+
+
 # Define tuples (algorithm setup function, algorithm function, name)
 gap = (run_gap, 'GAP')
 sl0 = (run_sl0, 'SL0_approx')
@@ -57,15 +79,15 @@
   #Set up experiment parameters
   d = 50.0;
   sigma = 2.0
-  deltas = np.arange(0.05,0.95,0.05)
-  rhos = np.arange(0.05,0.95,0.05)
-  #deltas = np.array([0.15,0.95])
-  #rhos = np.array([0.15,0.95])
+  #deltas = np.arange(0.05,1.,0.05)
+  #rhos = np.arange(0.05,1.,0.05)
+  deltas = np.array([0.05, 0.45, 0.95])
+  rhos = np.array([0.05, 0.45, 0.95])
   #deltas = np.array([0.05])
   #rhos = np.array([0.05])
   #delta = 0.8;
   #rho   = 0.15;
-  numvects = 20; # Number of vectors to generate
+  numvects = 100; # Number of vectors to generate
   SNRdb = 20.;    # This is norm(signal)/norm(noise), so power, not energy
   # Values for lambda
   #lambdas = [0 10.^linspace(-5, 4, 10)];
@@ -121,14 +143,15 @@
       print "Oops, Type Error"
       raise    
   # Show
-  #  for algotuple in algosN:
-  #    plt.figure()
-  #    plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest')
-  #  for algotuple in algosL:
-  #    for ilbd in np.arange(lambdas.size):
-  #      plt.figure()
-  #      plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest')
-  #  plt.show()
+  if doplot:
+    for algotuple in algosN:
+      plt.figure()
+      plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower')
+    for algotuple in algosL:
+      for ilbd in np.arange(lambdas.size):
+        plt.figure()
+        plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower')
+    plt.show()
   print "Finished."
   
 def genData(d,sigma,delta,rho,numvects,SNRdb):
@@ -211,4 +234,7 @@
   
 # Script main
 if __name__ == "__main__":
+
+  #import cProfile
+  #cProfile.run('mainrun()', 'profile')    
   mainrun()
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/ABSapproxMultiproc.py	Wed Nov 09 18:18:42 2011 +0000
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Sat Nov 05 18:08:40 2011
+
+@author: Nic
+"""
+
+import numpy as np
+import scipy.io
+import math
+from multiprocessing import Pool
+doplot = True
+try: 
+  import matplotlib.pyplot as plt
+except:
+  doplot = False
+if doplot:  
+  import matplotlib.cm as cm
+import pyCSalgos
+import pyCSalgos.GAP.GAP
+import pyCSalgos.SL0.SL0_approx
+
+# Define functions that prepare arguments for each algorithm call
+def run_gap(y,M,Omega,epsilon):
+  gapparams = {"num_iteration" : 1000,\
+                   "greedy_level" : 0.9,\
+                   "stopping_coefficient_size" : 1e-4,\
+                   "l2solver" : 'pseudoinverse',\
+                   "noise_level": epsilon}
+  return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0]
+ 
+def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd):
+  
+  N,n = Omega.shape
+  #D = np.linalg.pinv(Omega)
+  #U,S,Vt = np.linalg.svd(D)
+  aggDupper = np.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = np.concatenate((aggDupper, lbd * aggDlower))
+  aggy = np.concatenate((y, np.zeros(N-n)))
+  
+  sigmamin = 0.001
+  sigma_decrease_factor = 0.5
+  mu_0 = 2
+  L = 10
+  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
+
+def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd):
+  
+  N,n = Omega.shape
+  #D = np.linalg.pinv(Omega)
+  #U,S,Vt = np.linalg.svd(D)
+  aggDupper = np.dot(M,D)
+  aggDlower = Vt[-(N-n):,:]
+  aggD = np.concatenate((aggDupper, lbd * aggDlower))
+  aggy = np.concatenate((y, np.zeros(N-n)))
+  
+  sigmamin = 0.001
+  sigma_decrease_factor = 0.5
+  mu_0 = 2
+  L = 10
+  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
+
+
+# Define tuples (algorithm setup function, algorithm function, name)
+gap = (run_gap, 'GAP')
+sl0 = (run_sl0, 'SL0_approx')
+
+# Define which algorithms to run
+#  1. Algorithms not depending on lambda
+algosN = gap,   # tuple
+#  2. Algorithms depending on lambda (our ABS approach)
+algosL = sl0,   # tuple
+
+def mainrun():
+  
+  nalgosN = len(algosN)  
+  nalgosL = len(algosL)
+  
+  #Set up experiment parameters
+  d = 50.0;
+  sigma = 2.0
+  #deltas = np.arange(0.05,1.,0.05)
+  #rhos = np.arange(0.05,1.,0.05)
+  deltas = np.array([0.05, 0.45, 0.95])
+  rhos = np.array([0.05, 0.45, 0.95])
+  #deltas = np.array([0.05])
+  #rhos = np.array([0.05])
+  #delta = 0.8;
+  #rho   = 0.15;
+  numvects = 100; # Number of vectors to generate
+  SNRdb = 20.;    # This is norm(signal)/norm(noise), so power, not energy
+  # Values for lambda
+  #lambdas = [0 10.^linspace(-5, 4, 10)];
+  #lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10)))
+  lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000])
+
+  meanmatrix = dict()
+  for i,algo in zip(np.arange(nalgosN),algosN):
+    meanmatrix[algo[1]]   = np.zeros((rhos.size, deltas.size))
+  for i,algo in zip(np.arange(nalgosL),algosL):
+    meanmatrix[algo[1]]   = np.zeros((lambdas.size, rhos.size, deltas.size))
+  
+  jobparams = []
+  for idelta,delta in zip(np.arange(deltas.size),deltas):
+    for irho,rho in zip(np.arange(rhos.size),rhos):
+      
+      # Generate data and operator
+      Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb)
+      
+      # Run algorithms
+      print "***** delta = ",delta," rho = ",rho
+      #mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0)
+      jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0))
+
+  pool = Pool(4)
+  jobresults = pool.map(runoncetuple,jobparams)
+
+  idx = 0
+  for idelta,delta in zip(np.arange(deltas.size),deltas):
+    for irho,rho in zip(np.arange(rhos.size),rhos):
+      mrelerrN,mrelerrL = jobresults[idx]
+      idx = idx+1
+      for algotuple in algosN: 
+        meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]]
+        if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]):
+          meanmatrix[algotuple[1]][irho,idelta] = 0
+      for algotuple in algosL:
+        for ilbd in np.arange(lambdas.size):
+          meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd]
+          if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]):
+            meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0
+   
+  #  # Prepare matrices to show
+  #  showmats = dict()
+  #  for i,algo in zip(np.arange(nalgosN),algosN):
+  #    showmats[algo[1]]   = np.zeros(rhos.size, deltas.size)
+  #  for i,algo in zip(np.arange(nalgosL),algosL):
+  #    showmats[algo[1]]   = np.zeros(lambdas.size, rhos.size, deltas.size)
+
+    # Save
+    tosave = dict()
+    tosave['meanmatrix'] = meanmatrix
+    tosave['d'] = d
+    tosave['sigma'] = sigma
+    tosave['deltas'] = deltas
+    tosave['rhos'] = rhos
+    tosave['numvects'] = numvects
+    tosave['SNRdb'] = SNRdb
+    tosave['lambdas'] = lambdas
+    try:
+      scipy.io.savemat('ABSapprox.mat',tosave)
+    except TypeError:
+      print "Oops, Type Error"
+      raise    
+  # Show
+  if doplot:
+    for algotuple in algosN:
+      plt.figure()
+      plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest',origin='lower')
+    for algotuple in algosL:
+      for ilbd in np.arange(lambdas.size):
+        plt.figure()
+        plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest',origin='lower')
+    plt.show()
+  print "Finished."
+  
+def genData(d,sigma,delta,rho,numvects,SNRdb):
+
+  # Process parameters
+  noiselevel = 1.0 / (10.0**(SNRdb/10.0));
+  p = round(sigma*d);
+  m = round(delta*d);
+  l = round(d - rho*m);
+  
+  # Generate Omega and data based on parameters
+  Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p);
+  # Optionally make Omega more coherent
+  U,S,Vt = np.linalg.svd(Omega);
+  Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega!
+  Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1]))))
+  Omega = np.dot(U , np.dot(Snew,Vt))
+
+  # Generate data  
+  x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0');
+  
+  return Omega,x0,y,M,realnoise
+
+def runoncetuple(t):
+  return runonce(*t)
+
+def runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0):
+  
+  d = Omega.shape[1]  
+  
+  nalgosN = len(algosN)  
+  nalgosL = len(algosL)
+  
+  xrec = dict()
+  err = dict()
+  relerr = dict()
+
+  # Prepare storage variables for algorithms non-Lambda
+  for i,algo in zip(np.arange(nalgosN),algosN):
+    xrec[algo[1]]   = np.zeros((d, y.shape[1]))
+    err[algo[1]]    = np.zeros(y.shape[1])
+    relerr[algo[1]] = np.zeros(y.shape[1])
+  # Prepare storage variables for algorithms with Lambda    
+  for i,algo in zip(np.arange(nalgosL),algosL):
+    xrec[algo[1]]   = np.zeros((lambdas.size, d, y.shape[1]))
+    err[algo[1]]    = np.zeros((lambdas.size, y.shape[1]))
+    relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1]))
+  
+  # Run algorithms non-Lambda
+  for iy in np.arange(y.shape[1]):
+    for algofunc,strname in algosN:
+      epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
+      xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon)
+      err[strname][iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy])
+      relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy])
+  for algotuple in algosN:
+    print algotuple[1],' : avg relative error = ',np.mean(relerr[strname])  
+
+  # Run algorithms with Lambda
+  for ilbd,lbd in zip(np.arange(lambdas.size),lambdas):
+    for iy in np.arange(y.shape[1]):
+      D = np.linalg.pinv(Omega)
+      U,S,Vt = np.linalg.svd(D)
+      for algofunc,strname in algosL:
+        epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
+        gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd)
+        xrec[strname][ilbd,:,iy] = np.dot(D,gamma)
+        err[strname][ilbd,iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy])
+        relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy])
+    print 'Lambda = ',lbd,' :'
+    for algotuple in algosL:
+      print '   ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:])
+
+  # Prepare results
+  mrelerrN = dict()
+  for algotuple in algosN:
+    mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]])
+  mrelerrL = dict()
+  for algotuple in algosL:
+    mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1)
+  
+  return mrelerrN,mrelerrL
+  
+# Script main
+if __name__ == "__main__":
+  #import cProfile
+  #cProfile.run('mainrun()', 'profile')    
+
+  mainrun()
--- a/scripts/ABSapproxPP.py	Wed Nov 09 11:11:39 2011 +0000
+++ b/scripts/ABSapproxPP.py	Wed Nov 09 18:18:42 2011 +0000
@@ -5,11 +5,11 @@
 @author: Nic
 """
 
-import numpy as np
+import numpy
 import scipy.io
 import math
-import matplotlib.pyplot as plt
-import matplotlib.cm as cm
+#import matplotlib.pyplot as plt
+#import matplotlib.cm as cm
 import pp
 import pyCSalgos
 import pyCSalgos.GAP.GAP
@@ -22,17 +22,17 @@
                    "stopping_coefficient_size" : 1e-4,\
                    "l2solver" : 'pseudoinverse',\
                    "noise_level": epsilon}
-  return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0]
+  return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,numpy.zeros(Omega.shape[1]))[0]
  
 def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd):
   
   N,n = Omega.shape
-  #D = np.linalg.pinv(Omega)
-  #U,S,Vt = np.linalg.svd(D)
-  aggDupper = np.dot(M,D)
+  #D = numpy.linalg.pinv(Omega)
+  #U,S,Vt = numpy.linalg.svd(D)
+  aggDupper = numpy.dot(M,D)
   aggDlower = Vt[-(N-n):,:]
-  aggD = np.concatenate((aggDupper, lbd * aggDlower))
-  aggy = np.concatenate((y, np.zeros(N-n)))
+  aggD = numpy.concatenate((aggDupper, lbd * aggDlower))
+  aggy = numpy.concatenate((y, numpy.zeros(N-n)))
   
   sigmamin = 0.001
   sigma_decrease_factor = 0.5
@@ -58,32 +58,32 @@
   #Set up experiment parameters
   d = 50;
   sigma = 2.0
-  #deltas = np.arange(0.05,0.95,0.05)
-  #rhos = np.arange(0.05,0.95,0.05)
-  deltas = np.array([0.05,0.95])
-  rhos = np.array([0.05,0.95])
-  #deltas = np.array([0.05])
-  #rhos = np.array([0.05])
+  #deltas = numpy.arange(0.05,0.95,0.05)
+  #rhos = numpy.arange(0.05,0.95,0.05)
+  deltas = numpy.array([0.05, 0.45, 0.95])
+  rhos = numpy.array([0.05, 0.45, 0.95])
+  #deltas = numpy.array([0.05])
+  #rhos = numpy.array([0.05])
   #delta = 0.8;
   #rho   = 0.15;
   numvects = 10; # Number of vectors to generate
   SNRdb = 20.;    # This is norm(signal)/norm(noise), so power, not energy
   # Values for lambda
   #lambdas = [0 10.^linspace(-5, 4, 10)];
-  lambdas = np.concatenate((np.array([0]), 10**np.linspace(-5, 4, 10)))
+  lambdas = numpy.concatenate((numpy.array([0]), 10**numpy.linspace(-5, 4, 10)))
 
   meanmatrix = dict()
-  for i,algo in zip(np.arange(nalgosN),algosN):
-    meanmatrix[algo[1]]   = np.zeros((rhos.size, deltas.size))
-  for i,algo in zip(np.arange(nalgosL),algosL):
-    meanmatrix[algo[1]]   = np.zeros((lambdas.size, rhos.size, deltas.size))
+  for i,algo in zip(numpy.arange(nalgosN),algosN):
+    meanmatrix[algo[1]]   = numpy.zeros((rhos.size, deltas.size))
+  for i,algo in zip(numpy.arange(nalgosL),algosL):
+    meanmatrix[algo[1]]   = numpy.zeros((lambdas.size, rhos.size, deltas.size))
 
   # PP: start job server  
-  job_server = pp.Server(ncpus = 1) 
+  job_server = pp.Server(ncpus = 4) 
   idx = 0
   jobparams = []
-  for idelta,delta in zip(np.arange(deltas.size),deltas):
-    for irho,rho in zip(np.arange(rhos.size),rhos):
+  for idelta,delta in zip(numpy.arange(deltas.size),deltas):
+    for irho,rho in zip(numpy.arange(rhos.size),rhos):
       
       # Generate data and operator
       Omega,x0,y,M,realnoise = genData(d,sigma,delta,rho,numvects,SNRdb)
@@ -93,21 +93,24 @@
       idx = idx + 1
       
   # Run algorithms
-  jobs = [job_server.submit(runonce, jobparam, (run_gap,run_sl0), ('numpy',)) for jobparam in jobparams]
+  modules = ('numpy','pyCSalgos','pyCSalgos.GAP.GAP','pyCSalgos.SL0.SL0_approx')
+  depfuncs = ()
+  jobs = [job_server.submit(runonce, jobparam, (run_gap,run_sl0), modules, depfuncs) for jobparam in jobparams]
   #funcarray[idelta,irho] = job_server.submit(runonce,(algosN,algosL, Omega,y,lambdas,realnoise,M,x0), (run_gap,run_sl0))
       #mrelerrN,mrelerrL = runonce(algosN,algosL,Omega,y,lambdas,realnoise,M,x0)
 
   # Get data from jobs
   idx = 0
-  for idelta,delta in zip(np.arange(deltas.size),deltas):
-    for irho,rho in zip(np.arange(rhos.size),rhos):
+  for idelta,delta in zip(numpy.arange(deltas.size),deltas):
+    for irho,rho in zip(numpy.arange(rhos.size),rhos):
+      print "***** delta = ",delta," rho = ",rho
       mrelerrN,mrelerrL = jobs[idx]()
       for algotuple in algosN: 
         meanmatrix[algotuple[1]][irho,idelta] = 1 - mrelerrN[algotuple[1]]
         if meanmatrix[algotuple[1]][irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][irho,idelta]):
           meanmatrix[algotuple[1]][irho,idelta] = 0
       for algotuple in algosL:
-        for ilbd in np.arange(lambdas.size):
+        for ilbd in numpy.arange(lambdas.size):
           meanmatrix[algotuple[1]][ilbd,irho,idelta] = 1 - mrelerrL[algotuple[1]][ilbd]
           if meanmatrix[algotuple[1]][ilbd,irho,idelta] < 0 or math.isnan(meanmatrix[algotuple[1]][ilbd,irho,idelta]):
             meanmatrix[algotuple[1]][ilbd,irho,idelta] = 0
@@ -115,10 +118,10 @@
    
   #  # Prepare matrices to show
   #  showmats = dict()
-  #  for i,algo in zip(np.arange(nalgosN),algosN):
-  #    showmats[algo[1]]   = np.zeros(rhos.size, deltas.size)
-  #  for i,algo in zip(np.arange(nalgosL),algosL):
-  #    showmats[algo[1]]   = np.zeros(lambdas.size, rhos.size, deltas.size)
+  #  for i,algo in zip(numpy.arange(nalgosN),algosN):
+  #    showmats[algo[1]]   = numpy.zeros(rhos.size, deltas.size)
+  #  for i,algo in zip(numpy.arange(nalgosL),algosL):
+  #    showmats[algo[1]]   = numpy.zeros(lambdas.size, rhos.size, deltas.size)
 
     # Save
     tosave = dict()
@@ -136,14 +139,14 @@
       print "Oops, Type Error"
       raise    
   # Show
-  for algotuple in algosN:
-    plt.figure()
-    plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest')
-  for algotuple in algosL:
-    for ilbd in np.arange(lambdas.size):
-      plt.figure()
-      plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest')
-  plt.show()
+  #  for algotuple in algosN:
+  #    plt.figure()
+  #    plt.imshow(meanmatrix[algotuple[1]], cmap=cm.gray, interpolation='nearest')
+  #  for algotuple in algosL:
+  #    for ilbd in numpy.arange(lambdas.size):
+  #      plt.figure()
+  #      plt.imshow(meanmatrix[algotuple[1]][ilbd], cmap=cm.gray, interpolation='nearest')
+  #  plt.show()
   print "Finished."
   
 def genData(d,sigma,delta,rho,numvects,SNRdb):
@@ -157,10 +160,10 @@
   # Generate Omega and data based on parameters
   Omega = pyCSalgos.GAP.GAP.Generate_Analysis_Operator(d, p);
   # Optionally make Omega more coherent
-  U,S,Vt = np.linalg.svd(Omega);
-  Sdnew = S * (1+np.arange(S.size)) # Make D coherent, not Omega!
-  Snew = np.vstack((np.diag(Sdnew), np.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1]))))
-  Omega = np.dot(U , np.dot(Snew,Vt))
+  U,S,Vt = numpy.linalg.svd(Omega);
+  Sdnew = S * (1+numpy.arange(S.size)) # Make D coherent, not Omega!
+  Snew = numpy.vstack((numpy.diag(Sdnew), numpy.zeros((Omega.shape[0] - Omega.shape[1], Omega.shape[1]))))
+  Omega = numpy.dot(U , numpy.dot(Snew,Vt))
 
   # Generate data  
   x0,y,M,Lambda,realnoise = pyCSalgos.GAP.GAP.Generate_Data_Known_Omega(Omega, d,p,m,l,noiselevel, numvects,'l0');
@@ -179,48 +182,48 @@
   relerr = dict()
 
   # Prepare storage variables for algorithms non-Lambda
-  for i,algo in zip(np.arange(nalgosN),algosN):
-    xrec[algo[1]]   = np.zeros((d, y.shape[1]))
-    err[algo[1]]    = np.zeros(y.shape[1])
-    relerr[algo[1]] = np.zeros(y.shape[1])
+  for i,algo in zip(numpy.arange(nalgosN),algosN):
+    xrec[algo[1]]   = numpy.zeros((d, y.shape[1]))
+    err[algo[1]]    = numpy.zeros(y.shape[1])
+    relerr[algo[1]] = numpy.zeros(y.shape[1])
   # Prepare storage variables for algorithms with Lambda    
-  for i,algo in zip(np.arange(nalgosL),algosL):
-    xrec[algo[1]]   = np.zeros((lambdas.size, d, y.shape[1]))
-    err[algo[1]]    = np.zeros((lambdas.size, y.shape[1]))
-    relerr[algo[1]] = np.zeros((lambdas.size, y.shape[1]))
+  for i,algo in zip(numpy.arange(nalgosL),algosL):
+    xrec[algo[1]]   = numpy.zeros((lambdas.size, d, y.shape[1]))
+    err[algo[1]]    = numpy.zeros((lambdas.size, y.shape[1]))
+    relerr[algo[1]] = numpy.zeros((lambdas.size, y.shape[1]))
   
   # Run algorithms non-Lambda
-  for iy in np.arange(y.shape[1]):
+  for iy in numpy.arange(y.shape[1]):
     for algofunc,strname in algosN:
-      epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
+      epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy])
       xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon)
-      err[strname][iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy])
-      relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy])
+      err[strname][iy]    = numpy.linalg.norm(x0[:,iy] - xrec[strname][:,iy])
+      relerr[strname][iy] = err[strname][iy] / numpy.linalg.norm(x0[:,iy])
   for algotuple in algosN:
-    print algotuple[1],' : avg relative error = ',np.mean(relerr[strname])  
+    print algotuple[1],' : avg relative error = ',numpy.mean(relerr[strname])  
 
   # Run algorithms with Lambda
-  for ilbd,lbd in zip(np.arange(lambdas.size),lambdas):
-    for iy in np.arange(y.shape[1]):
-      D = np.linalg.pinv(Omega)
-      U,S,Vt = np.linalg.svd(D)
+  for ilbd,lbd in zip(numpy.arange(lambdas.size),lambdas):
+    for iy in numpy.arange(y.shape[1]):
+      D = numpy.linalg.pinv(Omega)
+      U,S,Vt = numpy.linalg.svd(D)
       for algofunc,strname in algosL:
-        epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
+        epsilon = 1.1 * numpy.linalg.norm(realnoise[:,iy])
         gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd)
-        xrec[strname][ilbd,:,iy] = np.dot(D,gamma)
-        err[strname][ilbd,iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy])
-        relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy])
+        xrec[strname][ilbd,:,iy] = numpy.dot(D,gamma)
+        err[strname][ilbd,iy]    = numpy.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy])
+        relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / numpy.linalg.norm(x0[:,iy])
     print 'Lambda = ',lbd,' :'
     for algotuple in algosL:
-      print '   ',algotuple[1],' : avg relative error = ',np.mean(relerr[strname][ilbd,:])
+      print '   ',algotuple[1],' : avg relative error = ',numpy.mean(relerr[strname][ilbd,:])
 
   # Prepare results
   mrelerrN = dict()
   for algotuple in algosN:
-    mrelerrN[algotuple[1]] = np.mean(relerr[algotuple[1]])
+    mrelerrN[algotuple[1]] = numpy.mean(relerr[algotuple[1]])
   mrelerrL = dict()
   for algotuple in algosL:
-    mrelerrL[algotuple[1]] = np.mean(relerr[algotuple[1]],1)
+    mrelerrL[algotuple[1]] = numpy.mean(relerr[algotuple[1]],1)
   
   return mrelerrN,mrelerrL
   
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/utils.py	Wed Nov 09 18:18:42 2011 +0000
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Nov 09 12:28:55 2011
+
+@author: ncleju
+"""
+
+import scipy.io
+import matplotlib.pyplot as plt
+import matplotlib.cm as cm
+
+def loadshowmatrices(filename, algostr = ('GAP','SL0_approx')):
+    mdict = scipy.io.loadmat(filename)
+    for strname in algostr:
+        print strname
+        if mdict['meanmatrix'][strname][0,0].ndim == 2:
+            plt.figure()
+            plt.imshow(mdict['meanmatrix'][strname][0,0], cmap=cm.gray, interpolation='nearest',origin='lower')            
+        elif mdict['meanmatrix'][strname][0,0].ndim == 3:
+            for matrix in mdict['meanmatrix'][strname][0,0]:
+                plt.figure()
+                plt.imshow(matrix, cmap=cm.gray, interpolation='nearest',origin='lower')
+    plt.show()
+    print "Finished."
+