changeset 37:afcfd4d1d548

17.11.2011 Lots of stuff: Implemented l1qec() (variant of l1 minimization with both quadratic and equality constraints - no ABS, no lambda) Implemented SL0a2() (variant of SL0a approximate recovery with both quadratic and equality constraints - no ABS, no lambda) Fixed HUGE bug: was running SL0 instead of BP!!!
author nikcleju
date Thu, 17 Nov 2011 17:29:54 +0000
parents 539b21849166
children aa3e89435a2a
files pyCSalgos/BP/l1qc.py pyCSalgos/BP/l1qec.py pyCSalgos/SL0/SL0_approx.py scripts/ABSapprox.py
diffstat 4 files changed, 595 insertions(+), 42 deletions(-) [+]
line wrap: on
line diff
--- a/pyCSalgos/BP/l1qc.py	Tue Nov 15 15:11:56 2011 +0000
+++ b/pyCSalgos/BP/l1qc.py	Thu Nov 17 17:29:54 2011 +0000
@@ -3,6 +3,8 @@
 import scipy.linalg
 import math
 
+class l1qcInputValueError(Exception):
+  pass
 
 #function [x, res, iter] = cgsolve(A, b, tol, maxiter, verbose)
 def cgsolve(A, b, tol, maxiter, verbose=1):
@@ -155,7 +157,7 @@
 
 
 #function [xp, up, niter] = l1qc_newton(x0, u0, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter) 
-def l1qc_newton(x0, u0, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter):
+def l1qc_newton(x0, u0, A, At, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter, verbose=False):
     # Newton algorithm for log-barrier subproblems for l1 minimization
     # with quadratic constraints.
     #
@@ -364,7 +366,8 @@
         h11pfun = lambda z: sigx*z - (1.0/fe)*At(A(z)) + 1.0/(fe**2)*np.dot(np.dot(atr.T,z),atr)
         dx,cgres,cgiter = cgsolve(h11pfun, w1p, cgtol, cgmaxiter, 0)
         if (cgres > 1.0/2):
-          print 'Cannot solve system.  Returning previous iterate.  (See Section 4 of notes for more information.)'
+          if verbose:
+            print 'Cannot solve system.  Returning previous iterate.  (See Section 4 of notes for more information.)'
           xp = x.copy()
           up = u.copy()
           return xp,up,niter
@@ -386,12 +389,14 @@
             #dx = np.linalg.solve(H11p, w1p)
             hcond = 1.0/np.linalg.cond(H11p)
         except scipy.linalg.LinAlgError:
-            print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
+            if verbose:
+              print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
             xp = x.copy()
             up = u.copy()
             return xp,up,niter
         if hcond < 1e-14:
-            print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
+            if verbose:
+              print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
             xp = x.copy()
             up = u.copy()
             return xp,up,niter
@@ -443,7 +448,8 @@
         s = beta*s
         backiter = backiter + 1
         if (backiter > 32):
-          print 'Stuck on backtracking line search, returning previous iterate.  (See Section 4 of notes for more information.)'
+          if verbose:
+            print 'Stuck on backtracking line search, returning previous iterate.  (See Section 4 of notes for more information.)'
           xp = x.copy()
           up = u.copy()
           return xp,up,niter
@@ -473,19 +479,21 @@
           done = 0
       
       #disp(sprintf('Newton iter = #d, Functional = #8.3f, Newton decrement = #8.3f, Stepsize = #8.3e', ...
-      print 'Newton iter = ',niter,', Functional = ',f,', Newton decrement = ',lambda2/2.0,', Stepsize = ',stepsize
+      if verbose:
+        print 'Newton iter = ',niter,', Functional = ',f,', Newton decrement = ',lambda2/2.0,', Stepsize = ',stepsize
 
-      if largescale:
-        print '                CG Res = ',cgres,', CG Iter = ',cgiter
-      else:
-        print '                  H11p condition number = ',hcond
+      if verbose:
+        if largescale:
+          print '                CG Res = ',cgres,', CG Iter = ',cgiter
+        else:
+          print '                  H11p condition number = ',hcond
       #end
           
     #end
     return xp,up,niter
 
 #function xp = l1qc_logbarrier(x0, A, At, b, epsilon, lbtol, mu, cgtol, cgmaxiter)
-def l1qc_logbarrier(x0, A, At, b, epsilon, lbtol=1e-3, mu=10, cgtol=1e-8, cgmaxiter=200):
+def l1qc_logbarrier(x0, A, At, b, epsilon, lbtol=1e-3, mu=10, cgtol=1e-8, cgmaxiter=200, verbose=False):
     # Solve quadratically constrained l1 minimization:
     # min ||x||_1   s.t.  ||Ax - b||_2 <= \epsilon
     #
@@ -609,6 +617,10 @@
     # End of original Matab code
     #----------------------------
     
+    # Check if epsilon > 0. If epsilon is 0, the algorithm fails. You should run the algo with equality constraint instead  
+    if epsilon == 0:
+      raise l1qcInputValueError('Epsilon should be > 0!')       
+    
     #largescale = isa(A,'function_handle');
     if hasattr(A, '__call__'):
         largescale = True
@@ -630,13 +642,15 @@
     # starting point --- make sure that it is feasible
     if largescale:
       if np.linalg.norm(A(x0) - b) > epsilon:
-        print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
+        if verbose:
+          print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
         #AAt = @(z) A(At(z));
         AAt = lambda z: A(At(z))
         # TODO: implement cgsolve
         w,cgres,cgiter = cgsolve(AAt, b, cgtol, cgmaxiter, 0)
         if (cgres > 1.0/2):
-          print 'A*At is ill-conditioned: cannot find starting point'
+          if verbose:
+            print 'A*At is ill-conditioned: cannot find starting point'
           xp = x0.copy()
           return xp
         #end
@@ -644,7 +658,8 @@
       #end
     else:
       if np.linalg.norm( np.dot(A,x0) - b ) > epsilon:
-        print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
+        if verbose:
+          print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
         #opts.POSDEF = true; opts.SYM = true;
         #[w, hcond] = linsolve(A*A', b, opts);
         #if (hcond < 1e-14)
@@ -655,13 +670,15 @@
         try:
             w = scipy.linalg.solve(np.dot(A,A.T), b, sym_pos=True)
             #w = np.linalg.solve(np.dot(A,A.T), b)
-            hcond = 1.0/scipy.linalg.cond(np.dot(A,A.T))
+            hcond = 1.0/np.linalg.cond(np.dot(A,A.T))
         except scipy.linalg.LinAlgError:
-            print 'A*At is ill-conditioned: cannot find starting point'
+            if verbose:
+              print 'A*At is ill-conditioned: cannot find starting point'
             xp = x0.copy()
             return xp
         if hcond < 1e-14:
-            print 'A*At is ill-conditioned: cannot find starting point'
+            if verbose:
+              print 'A*At is ill-conditioned: cannot find starting point'
             xp = x0.copy()
             return xp           
         #x0 = A'*w;
@@ -672,7 +689,8 @@
     u = (0.95)*np.abs(x0) + (0.10)*np.abs(x0).max()
     
     #disp(sprintf('Original l1 norm = #.3f, original functional = #.3f', sum(abs(x0)), sum(u)));
-    print 'Original l1 norm = ',np.abs(x0).sum(),'original functional = ',u.sum()
+    if verbose:
+      print 'Original l1 norm = ',np.abs(x0).sum(),'original functional = ',u.sum()
     
     # choose initial value of tau so that the duality gap after the first
     # step will be about the origial norm
@@ -680,7 +698,8 @@
                                                                                                                               
     lbiter = math.ceil((math.log(2*N+1)-math.log(lbtol)-math.log(tau))/math.log(mu))
     #disp(sprintf('Number of log barrier iterations = #d\n', lbiter));
-    print 'Number of log barrier iterations = ',lbiter
+    if verbose:
+      print 'Number of log barrier iterations = ',lbiter
     
     totaliter = 0
     
@@ -697,7 +716,8 @@
       
       #disp(sprintf('\nLog barrier iter = #d, l1 = #.3f, functional = #8.3f, tau = #8.3e, total newton iter = #d\n', ...
       #  ii, sum(abs(xp)), sum(up), tau, totaliter));
-      print 'Log barrier iter = ',ii,', l1 = ',np.abs(xp).sum(),', functional = ',up.sum(),', tau = ',tau,', total newton iter = ',totaliter
+      if verbose:
+        print 'Log barrier iter = ',ii,', l1 = ',np.abs(xp).sum(),', functional = ',up.sum(),', tau = ',tau,', total newton iter = ',totaliter
       x = xp.copy()
       u = up.copy()
      
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyCSalgos/BP/l1qec.py	Thu Nov 17 17:29:54 2011 +0000
@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Nov 17 15:47:36 2011
+
+Solve l1 minimization with quadratic AND equality constraints
+
+@author: ncleju
+"""
+
+
+import numpy as np
+import scipy.linalg
+import math
+
+class l1qecInputValueError(Exception):
+  pass
+
+# This is not normally used, so it is not tested, probably doesn't work
+def cgsolve(A, b, tol, maxiter, verbose=1):
+    raise Exception('Shouldn\'t run cgsolve(), as this is absolutely not tested. Comment this if you really want to proceed.')
+  
+    
+    #if (nargin < 5), verbose = 1; end
+    # Optional argument
+    
+    #implicit = isa(A,'function_handle');
+    if hasattr(A, '__call__'):
+        implicit = True
+    else:
+        implicit = False
+    
+    x = np.zeros(b.size)
+    r = b.copy()
+    d = r.copy()
+    delta = np.vdot(r,r)
+    delta0 = np.vdot(b,b)
+    numiter = 0
+    bestx = x.copy()
+    bestres = math.sqrt(delta/delta0)
+    while (numiter < maxiter) and (delta > tol**2*delta0):
+    
+      # q = A*d
+      #if (implicit), q = A(d);  else  q = A*d;  end
+      if implicit:
+          q = A(d)
+      else:
+          q = np.dot(A,d)
+     
+      alpha = delta/np.vdot(d,q)
+      x = x + alpha*d
+      
+      if divmod(numiter+1,50)[1] == 0:
+        # r = b - Aux*x
+        #if (implicit), r = b - A(x);  else  r = b - A*x;  end
+        if implicit:
+            r = b - A(x)
+        else:
+            r = b - np.dot(A,x)
+      else:
+        r = r - alpha*q
+      #end
+      
+      deltaold = delta;
+      delta = np.vdot(r,r)
+      beta = delta/deltaold;
+      d = r + beta*d
+      numiter = numiter + 1
+      if (math.sqrt(delta/delta0) < bestres):
+        bestx = x.copy()
+        bestres = math.sqrt(delta/delta0)
+      #end    
+      
+      if ((verbose) and (divmod(numiter,verbose)[1]==0)):
+        #disp(sprintf('cg: Iter = #d, Best residual = #8.3e, Current residual = #8.3e', ...
+        #  numiter, bestres, sqrt(delta/delta0)));
+        print 'cg: Iter = ',numiter,', Best residual = ',bestres,', Current residual = ',math.sqrt(delta/delta0)
+      #end
+      
+    #end
+    
+    if (verbose):
+      #disp(sprintf('cg: Iterations = #d, best residual = #14.8e', numiter, bestres));
+      print 'cg: Iterations = ',numiter,', best residual = ',bestres
+    #end
+    x = bestx.copy()
+    res = bestres
+    iter = numiter
+    
+    return x,res,iter
+
+
+
+def l1qec_newton(x0, u0, A, At, b, epsilon, Aexact, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter, verbose=False):
+
+    # check if the matrix A is implicit or explicit
+    #largescale = isa(A,'function_handle');
+    if hasattr(A, '__call__'):
+        largescale = True
+    else:
+        largescale = False    
+    
+    # line search parameters
+    alpha = 0.01
+    beta = 0.5
+    
+    #if (~largescale), AtA = A'*A; end
+    if not largescale:
+        AtA = np.dot(A.T,A)
+    
+    # initial point
+    x = x0.copy()
+    u = u0.copy()
+    #if (largescale), r = A(x) - b; else  r = A*x - b; end
+    if largescale:
+        r = A(x) - b
+    else:
+        r = np.dot(A,x) - b
+        
+    fu1 = x - u
+    fu2 = -x - u
+    fe = 1.0/2*(np.vdot(r,r) - epsilon**2)
+    f = u.sum() - (1.0/tau)*(np.log(-fu1).sum() + np.log(-fu2).sum() + math.log(-fe))
+    
+    niter = 0
+    done = 0
+    while not done:
+      
+      #if (largescale), atr = At(r); else  atr = A'*r; end
+      if largescale:
+          atr = At(r)
+      else:
+          atr = np.dot(A.T,r)
+      
+      #ntgz = 1./fu1 - 1./fu2 + 1/fe*atr;
+      ntgz = 1.0/fu1 - 1.0/fu2 + 1.0/fe*atr
+      #ntgu = -tau - 1./fu1 - 1./fu2;
+      ntgu = -tau - 1.0/fu1 - 1.0/fu2
+      #gradf = -(1/tau)*[ntgz; ntgu];
+      gradf = -(1.0/tau)*np.concatenate((ntgz, ntgu),0)
+      
+      #sig11 = 1./fu1.^2 + 1./fu2.^2;
+      sig11 = 1.0/(fu1**2) + 1.0/(fu2**2)
+      #sig12 = -1./fu1.^2 + 1./fu2.^2;
+      sig12 = -1.0/(fu1**2) + 1.0/(fu2**2)
+      #sigx = sig11 - sig12.^2./sig11;
+      sigx = sig11 - (sig12**2)/sig11
+        
+      #w1p = ntgz - sig12./sig11.*ntgu;
+      w1p = ntgz - sig12/sig11*ntgu
+      if largescale:
+        #h11pfun = @(z) sigx.*z - (1/fe)*At(A(z)) + 1/fe^2*(atr'*z)*atr;
+        h11pfun = lambda z: sigx*z - (1.0/fe)*At(A(z)) + 1.0/(fe**2)*np.dot(np.dot(atr.T,z),atr)
+        dx,cgres,cgiter = cgsolve(h11pfun, w1p, cgtol, cgmaxiter, 0)
+        if (cgres > 1.0/2):
+          if verbose:
+            print 'Cannot solve system.  Returning previous iterate.  (See Section 4 of notes for more information.)'
+          xp = x.copy()
+          up = u.copy()
+          return xp,up,niter
+        #end
+        Adx = A(dx)
+      else:
+        #H11p = diag(sigx) - (1/fe)*AtA + (1/fe)^2*atr*atr';
+        # Attention: atr is column vector, so atr*atr' means outer(atr,atr)
+        H11p = np.diag(sigx) - (1.0/fe)*AtA + (1.0/fe)**2*np.outer(atr,atr)
+        #opts.POSDEF = true; opts.SYM = true;
+        #[dx,hcond] = linsolve(H11p, w1p, opts);
+        #if (hcond < 1e-14)
+        #  disp('Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)');
+        #  xp = x;  up = u;
+        #  return
+        #end
+        
+        # Nic says: from tveq_newton.m
+        K = Aexact.shape[0]
+        afac = (np.diag(H11p)).max()
+        #Hp = [H11p afac*A'; afac*A zeros(K)])
+        Hp = np.vstack(( np.hstack((H11p,afac*Aexact.T)) , np.hstack((afac*Aexact,np.zeros((K,K)))) ))
+        wp = np.concatenate((w1p , np.zeros(K)))
+        try:
+            #dx = scipy.linalg.solve(H11p, w1p, sym_pos=True)
+            #hcond = 1.0/np.linalg.cond(H11p)
+            dxv = scipy.linalg.solve(Hp, wp, sym_pos=False) # Only symmetric, not posdef
+            dx = dxv[:x0.size]
+            hcond = 1.0/np.linalg.cond(Hp)
+        except scipy.linalg.LinAlgError:
+            if verbose:
+              print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
+            xp = x.copy()
+            up = u.copy()
+            return xp,up,niter
+        if hcond < 1e-14:
+            if verbose:
+              print 'Matrix ill-conditioned.  Returning previous iterate.  (See Section 4 of notes for more information.)'
+            xp = x.copy()
+            up = u.copy()
+            return xp,up,niter
+        
+        #Adx = A*dx;
+        Adx = np.dot(A,dx)
+      #end
+      #du = (1./sig11).*ntgu - (sig12./sig11).*dx;  
+      du = (1.0/sig11)*ntgu - (sig12/sig11)*dx;
+     
+      # minimum step size that stays in the interior
+      #ifu1 = find((dx-du) > 0); ifu2 = find((-dx-du) > 0);
+      ifu1 = np.nonzero((dx-du)>0)
+      ifu2 = np.nonzero((-dx-du)>0)
+      #aqe = Adx'*Adx;   bqe = 2*r'*Adx;   cqe = r'*r - epsilon^2;
+      aqe = np.dot(Adx.T,Adx)
+      bqe = 2*np.dot(r.T,Adx)
+      cqe = np.vdot(r,r) - epsilon**2
+      #smax = min(1,min([...
+      #  -fu1(ifu1)./(dx(ifu1)-du(ifu1)); -fu2(ifu2)./(-dx(ifu2)-du(ifu2)); ...
+      #  (-bqe+sqrt(bqe^2-4*aqe*cqe))/(2*aqe)
+      #  ]));
+      smax = min(1,np.concatenate( (-fu1[ifu1]/(dx[ifu1]-du[ifu1]) , -fu2[ifu2]/(-dx[ifu2]-du[ifu2]) , np.array([ (-bqe + math.sqrt(bqe**2-4*aqe*cqe))/(2*aqe) ]) ) , 0).min())
+      
+      s = 0.99 * smax
+      
+      # backtracking line search
+      suffdec = 0
+      backiter = 0
+      while not suffdec:
+        #xp = x + s*dx;  up = u + s*du;  rp = r + s*Adx;
+        xp = x + s*dx
+        up = u + s*du
+        rp = r + s*Adx
+        #fu1p = xp - up;  fu2p = -xp - up;  fep = 1/2*(rp'*rp - epsilon^2);
+        fu1p = xp - up
+        fu2p = -xp - up
+        fep = 0.5*(np.vdot(rp,rp) - epsilon**2)
+        #fp = sum(up) - (1/tau)*(sum(log(-fu1p)) + sum(log(-fu2p)) + log(-fep));
+        fp = up.sum() - (1.0/tau)*(np.log(-fu1p).sum() + np.log(-fu2p).sum() + math.log(-fep))
+        #flin = f + alpha*s*(gradf'*[dx; du]);
+        flin = f + alpha*s*np.dot(gradf.T , np.concatenate((dx,du),0))
+        #suffdec = (fp <= flin);
+        if fp <= flin:
+            suffdec = True
+        else:
+            suffdec = False
+        
+        s = beta*s
+        backiter = backiter + 1
+        if (backiter > 32):
+          if verbose:
+            print 'Stuck on backtracking line search, returning previous iterate.  (See Section 4 of notes for more information.)'
+          xp = x.copy()
+          up = u.copy()
+          return xp,up,niter
+        #end
+      #end
+      
+      # set up for next iteration
+      #x = xp; u = up;  r = rp;
+      x = xp.copy()
+      u = up.copy()
+      r = rp.copy()
+      #fu1 = fu1p;  fu2 = fu2p;  fe = fep;  f = fp;
+      fu1 = fu1p.copy()
+      fu2 = fu2p.copy()
+      fe = fep
+      f = fp
+      
+      #lambda2 = -(gradf'*[dx; du]);
+      lambda2 = -np.dot(gradf.T , np.concatenate((dx,du),0))
+      #stepsize = s*norm([dx; du]);
+      stepsize = s * np.linalg.norm(np.concatenate((dx,du),0))
+      niter = niter + 1
+      #done = (lambda2/2 < newtontol) | (niter >= newtonmaxiter);
+      if lambda2/2.0 < newtontol or niter >= newtonmaxiter:
+          done = 1
+      else:
+          done = 0
+      
+      #disp(sprintf('Newton iter = #d, Functional = #8.3f, Newton decrement = #8.3f, Stepsize = #8.3e', ...
+      if verbose:
+        print 'Newton iter = ',niter,', Functional = ',f,', Newton decrement = ',lambda2/2.0,', Stepsize = ',stepsize
+
+      if verbose:
+        if largescale:
+            print '                CG Res = ',cgres,', CG Iter = ',cgiter
+        else:
+            print '                  H11p condition number = ',hcond
+      #end
+          
+    #end
+    return xp,up,niter
+
+def l1qec_logbarrier(x0, A, At, b, epsilon, Aexact, Atexact, bexact, lbtol=1e-3, mu=10, cgtol=1e-8, cgmaxiter=200, verbose=False):
+    
+    # Check if epsilon > 0. If epsilon is 0, the algorithm fails. You should run the algo with equality constraint instead  
+    if epsilon == 0:
+      raise l1qecInputValueError('Epsilon should be > 0!')      
+    
+    #largescale = isa(A,'function_handle');
+    if hasattr(A, '__call__'):
+        largescale = True
+    else:
+        largescale = False
+    
+    #    if (nargin < 6), lbtol = 1e-3; end
+    #    if (nargin < 7), mu = 10; end
+    #    if (nargin < 8), cgtol = 1e-8; end
+    #    if (nargin < 9), cgmaxiter = 200; end
+    # Nic: added them as optional parameteres
+    
+    newtontol = lbtol
+    newtonmaxiter = 50
+    
+    #N = length(x0);
+    N = x0.size
+    
+    # starting point --- make sure that it is feasible
+    if largescale:
+      if np.linalg.norm(A(x0) - b) > epsilon or np.linalg.norm( np.dot(Aexact,x0) - bexact ) > 1e-15:
+        if verbose:
+          print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
+        #AAt = @(z) A(At(z));
+        AAt = lambda z: A(At(z))
+        # TODO: implement cgsolve
+        w,cgres,cgiter = cgsolve(AAt, b, cgtol, cgmaxiter, 0)
+        if (cgres > 1.0/2):
+          if verbose:
+            print 'A*At is ill-conditioned: cannot find starting point'
+          xp = x0.copy()
+          return xp
+        #end
+        x0 = At(w)
+      #end
+    else:
+      # Nic: add test for np.dot(Aexact,x0) - bexact ) > 1e-15
+      if np.linalg.norm( np.dot(A,x0) - b ) > epsilon or np.linalg.norm( np.dot(Aexact,x0) - bexact ) > 1e-15:
+        if verbose:
+          print 'Starting point infeasible; using x0 = At*inv(AAt)*y.'
+        
+        #Nic: stack A and Aexact, b and bexact, and use them instead of A and b
+        Abig = np.vstack((A,Aexact))
+        bbig = np.concatenate((b,bexact))
+        try:
+            w = scipy.linalg.solve(np.dot(Abig,Abig.T), bbig, sym_pos=True)
+            #w = np.linalg.solve(np.dot(A,A.T), b)
+            hcond = 1.0/np.linalg.cond(np.dot(Abig,Abig.T))
+        except scipy.linalg.LinAlgError:
+            if verbose:
+              print 'A*At is ill-conditioned: cannot find starting point'
+            xp = x0.copy()
+            return xp
+        if hcond < 1e-14:
+            if verbose:
+              print 'A*At is ill-conditioned: cannot find starting point'
+            xp = x0.copy()
+            return xp           
+        x0 = np.dot(Abig.T, w)        
+        #        try:
+        #            w = scipy.linalg.solve(np.dot(A,A.T), b, sym_pos=True)
+        #            #w = np.linalg.solve(np.dot(A,A.T), b)
+        #            hcond = 1.0/scipy.linalg.cond(np.dot(A,A.T))
+        #        except scipy.linalg.LinAlgError:
+        #            print 'A*At is ill-conditioned: cannot find starting point'
+        #            xp = x0.copy()
+        #            return xp
+        #        if hcond < 1e-14:
+        #            print 'A*At is ill-conditioned: cannot find starting point'
+        #            xp = x0.copy()
+        #            return xp           
+        #        #x0 = A'*w;
+        #        x0 = np.dot(A.T, w)
+      #end  
+    #end
+    x = x0.copy()
+    u = (0.95)*np.abs(x0) + (0.10)*np.abs(x0).max()
+    
+    #disp(sprintf('Original l1 norm = #.3f, original functional = #.3f', sum(abs(x0)), sum(u)));
+    if verbose:
+      print 'Original l1 norm = ',np.abs(x0).sum(),'original functional = ',u.sum()
+    
+    # choose initial value of tau so that the duality gap after the first
+    # step will be about the origial norm
+    tau = max(((2*N+1.0)/np.abs(x0).sum()), 1)
+                                                                                                                              
+    lbiter = math.ceil((math.log(2*N+1)-math.log(lbtol)-math.log(tau))/math.log(mu))
+    #disp(sprintf('Number of log barrier iterations = #d\n', lbiter));
+    if verbose:
+      print 'Number of log barrier iterations = ',lbiter
+    
+    totaliter = 0
+    
+    # Added by Nic, to fix some crashing
+    if lbiter == 0:
+        xp = np.zeros(x0.size)
+    #end
+    
+    #for ii = 1:lbiter
+    for ii in np.arange(lbiter):
+    
+      xp,up,ntiter = l1qec_newton(x, u, A, At, b, epsilon, Aexact, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter)
+      totaliter = totaliter + ntiter
+      
+      #disp(sprintf('\nLog barrier iter = #d, l1 = #.3f, functional = #8.3f, tau = #8.3e, total newton iter = #d\n', ...
+      #  ii, sum(abs(xp)), sum(up), tau, totaliter));
+      if verbose:
+        print 'Log barrier iter = ',ii,', l1 = ',np.abs(xp).sum(),', functional = ',up.sum(),', tau = ',tau,', total newton iter = ',totaliter
+      x = xp.copy()
+      u = up.copy()
+     
+      tau = mu*tau
+      
+    #end
+    return xp
+                   
--- a/pyCSalgos/SL0/SL0_approx.py	Tue Nov 15 15:11:56 2011 +0000
+++ b/pyCSalgos/SL0/SL0_approx.py	Thu Nov 17 17:29:54 2011 +0000
@@ -55,8 +55,63 @@
       sigma = sigma * sigma_decrease_factor
   
   return s
+
+# Direct approximate analysis-based version of SL0
+# Solves argimn_gamma ||gamma||_0 such that ||x - Aeps*gamma|| < eps AND ||Aexact*gamma|| = 0
+# Basically instead of having a single A, we now have one Aeps which is up to eps error
+#  and an Axeact which requires exact orthogonality
+# It is assumed that the rows of Aexact are orthogonal to the rows of Aeps
+def SL0_approx_analysis(Aeps, Aexact, x, eps, sigma_min, sigma_decrease_factor=0.5, mu_0=2, L=3, Aeps_pinv=None, Aexact_pinv=None, true_s=None):
   
+  if Aeps_pinv is None:
+    Aeps_pinv = np.linalg.pinv(Aeps)
+  if Aexact_pinv is None:
+    Aexact_pinv = np.linalg.pinv(Aexact)
+    
+  if true_s is not None:
+      ShowProgress = True
+  else:
+      ShowProgress = False
   
+  # Initialization
+  #s = A\x;
+  s = np.dot(Aeps_pinv,x)
+  sigma = 2.0 * np.abs(s).max()
+  
+  # Main Loop
+  while sigma>sigma_min:
+      for i in np.arange(L):
+          delta = OurDelta(s,sigma)
+          s = s - mu_0*delta
+          # At this point, s no longer exactly satisfies x = A*s
+          # The original SL0 algorithm projects s onto {s | x = As} with
+          # s = s - np.dot(A_pinv,(np.dot(A,s)-x))   # Projection
+          #
+          # We want to project s onto {s | |x-AEPS*s|<eps AND |Aexact*s|=0}
+          # First:   make s orthogonal to Aexact (|Aexact*s|=0)
+          # Second:  move onto the direction -A_pinv*(A*s-x), but only with a smaller step:
+          # This separation assumes that the rows of Aexact are orthogonal to the rows of Aeps
+          #
+          # 1. Make s orthogonal to Aexact:
+          #     s = s - Aexact_pinv * Aexact * s
+          s = s - np.dot(Aexact_pinv,(np.dot(Aexact,s)))
+          # 2. Move onto the direction -A_pinv*(A*s-x), but only with a smaller step:
+          direction = np.dot(Aeps_pinv,(np.dot(Aeps,s)-x))
+          if (np.linalg.norm(np.dot(Aeps,direction)) >= eps):
+            s = s - (1.0 - eps/np.linalg.norm(np.dot(Aeps,direction))) * direction
+
+          #assert(np.linalg.norm(x - np.dot(A,s)) < eps + 1e-6)          
+      
+      if ShowProgress:
+          #fprintf('     sigma=#f, SNR=#f\n',sigma,estimate_SNR(s,true_s))
+          string = '     sigma=%f, SNR=%f\n' % sigma,estimate_SNR(s,true_s)
+          print string
+      
+      sigma = sigma * sigma_decrease_factor
+  
+  return s
+
+ 
 ####################################################################
 #function delta=OurDelta(s,sigma)
 def OurDelta(s,sigma):
--- a/scripts/ABSapprox.py	Tue Nov 15 15:11:56 2011 +0000
+++ b/scripts/ABSapprox.py	Thu Nov 17 17:29:54 2011 +0000
@@ -12,6 +12,8 @@
 
 import pyCSalgos
 import pyCSalgos.GAP.GAP
+import pyCSalgos.BP.l1qc
+import pyCSalgos.BP.l1qec
 import pyCSalgos.SL0.SL0_approx
 import pyCSalgos.OMP.omp_QR
 import pyCSalgos.RecomTST.RecommendedTST
@@ -27,6 +29,33 @@
                    "noise_level": epsilon}
   return pyCSalgos.GAP.GAP.GAP(y,M,M.T,Omega,Omega.T,gapparams,np.zeros(Omega.shape[1]))[0]
 
+def run_bp_analysis(y,M,Omega,epsilon):
+  
+  N,n = Omega.shape
+  D = np.linalg.pinv(Omega)
+  U,S,Vt = np.linalg.svd(D)
+  Aeps = np.dot(M,D)
+  Aexact = Vt[-(N-n):,:]
+  # We don't ned any aggregate matrices anymore
+  
+  x0 = np.zeros(N)
+  return np.dot(D , pyCSalgos.BP.l1qec.l1qec_logbarrier(x0,Aeps,Aeps.T,y,epsilon,Aexact,Aexact.T,np.zeros(N-n)))
+
+def run_sl0_analysis(y,M,Omega,epsilon):
+  
+  N,n = Omega.shape
+  D = np.linalg.pinv(Omega)
+  U,S,Vt = np.linalg.svd(D)
+  Aeps = np.dot(M,D)
+  Aexact = Vt[-(N-n):,:]
+  # We don't ned any aggregate matrices anymore
+  
+  sigmamin = 0.001
+  sigma_decrease_factor = 0.5
+  mu_0 = 2
+  L = 10
+  return np.dot(D , pyCSalgos.SL0.SL0_approx.SL0_approx_analysis(Aeps,Aexact,y,epsilon,sigmamin,sigma_decrease_factor,mu_0,L))
+
 def run_sl0(y,M,Omega,D,U,S,Vt,epsilon,lbd):
   
   N,n = Omega.shape
@@ -42,7 +71,7 @@
   mu_0 = 2
   L = 10
   return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
-
+  
 def run_bp(y,M,Omega,D,U,S,Vt,epsilon,lbd):
   
   N,n = Omega.shape
@@ -52,12 +81,9 @@
   aggDlower = Vt[-(N-n):,:]
   aggD = np.concatenate((aggDupper, lbd * aggDlower))
   aggy = np.concatenate((y, np.zeros(N-n)))
-  
-  sigmamin = 0.001
-  sigma_decrease_factor = 0.5
-  mu_0 = 2
-  L = 10
-  return pyCSalgos.SL0.SL0_approx.SL0_approx(aggD,aggy,epsilon,sigmamin,sigma_decrease_factor,mu_0,L)
+
+  x0 = np.zeros(N)
+  return pyCSalgos.BP.l1qc.l1qc_logbarrier(x0,aggD,aggD.T,aggy,epsilon)
 
 def run_ompeps(y,M,Omega,D,U,S,Vt,epsilon,lbd):
   
@@ -93,6 +119,8 @@
 #==========================
 gap = (run_gap, 'GAP')
 sl0 = (run_sl0, 'SL0a')
+sl0analysis = (run_sl0_analysis, 'SL0a2')
+bpanalysis = (run_bp_analysis, 'BPa2')
 bp  = (run_bp, 'BP')
 ompeps = (run_ompeps, 'OMPeps')
 tst = (run_tst, 'TST')
@@ -171,7 +199,37 @@
   saveplotexts = ('png','pdf','eps')
 
   return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\
-          doshowplot,dosaveplot,saveplotbase,saveplotexts          
+          doshowplot,dosaveplot,saveplotbase,saveplotexts    
+          
+# Standard parameters 3
+# Algorithms: GAP, SL0a and SL0a2
+# d=50, sigma = 2, delta and rho only 3 x 3, lambdas = 0, 1e-4, 1e-2, 1, 100, 10000
+# Do save data, do save plots, don't show plots
+# Useful for short testing 
+def std3():
+  # Define which algorithms to run
+  algosN = gap,sl0analysis,bpanalysis      # tuple of algorithms not depending on lambda
+  algosL = sl0,bp    # tuple of algorithms depending on lambda (our ABS approach)
+  
+  d = 50.0
+  sigma = 2.0
+  deltas = np.array([0.05, 0.45, 0.95])
+  rhos = np.array([0.05, 0.45, 0.95])
+  numvects = 10; # Number of vectors to generate
+  SNRdb = 20.;    # This is norm(signal)/norm(noise), so power, not energy
+  # Values for lambda
+  #lambdas = [0 10.^linspace(-5, 4, 10)];
+  lambdas = np.array([0., 0.0001, 0.01, 1, 100, 10000])
+  
+  dosavedata = True
+  savedataname = 'approx_pt_std2.mat'
+  doshowplot = False
+  dosaveplot = True
+  saveplotbase = 'approx_pt_std2_'
+  saveplotexts = ('png','pdf','eps')
+
+  return algosN,algosL,d,sigma,deltas,rhos,lambdas,numvects,SNRdb,dosavedata,savedataname,\
+          doshowplot,dosaveplot,saveplotbase,saveplotexts
   
 #==========================
 # Interface run functions
@@ -198,14 +256,15 @@
 
   print "This is analysis recovery ABS approximation script by Nic"
   print "Running phase transition ( run_multi() )"
-  
-  if doparallel:
-    import multiprocessing
-    # Shared value holding the number of finished processes
-    # Add it as global of the module
-    import sys
-    currmodule = sys.modules[__name__]
-    currmodule.proccount = multiprocessing.Value('I', 0) # 'I' = unsigned int, see docs (multiprocessing, array)
+
+  # Not only for parallel  
+  #if doparallel:
+  import multiprocessing
+  # Shared value holding the number of finished processes
+  # Add it as global of the module
+  import sys
+  currmodule = sys.modules[__name__]
+  currmodule.proccount = multiprocessing.Value('I', 0) # 'I' = unsigned int, see docs (multiprocessing, array)
     
   if dosaveplot or doshowplot:
     try:
@@ -266,8 +325,9 @@
       print "    delta = ",delta," rho = ",rho
       jobparams.append((algosN,algosL, Omega,y,lambdas,realnoise,M,x0))
   
-  if doparallel:
-    currmodule.njobs = deltas.size * rhos.size  
+  # Not only for parallel
+  #if doparallel:
+  currmodule.njobs = deltas.size * rhos.size  
   print "End of parameters"
   
   # Run
@@ -278,7 +338,7 @@
     jobresults = pool.map(run_once_tuple, jobparams)
   else:
     for jobparam in jobparams:
-      jobresults.append(run_once(algosN,algosL,Omega,y,lambdas,realnoise,M,x0))
+      jobresults.append(run_once_tuple(jobparam))
 
   # Read results
   idx = 0
@@ -357,6 +417,7 @@
   nalgosN = len(algosN)  
   nalgosL = len(algosL)
   
+  
   xrec = dict()
   err = dict()
   relerr = dict()
@@ -376,7 +437,10 @@
   for iy in np.arange(y.shape[1]):
     for algofunc,strname in algosN:
       epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
-      xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon)
+      try:
+        xrec[strname][:,iy] = algofunc(y[:,iy],M,Omega,epsilon)
+      except pyCSalgos.BP.l1qec.l1qecInputValueError as e:
+        print "Caught exception when running algorithm",strname," :",e.message
       err[strname][iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][:,iy])
       relerr[strname][iy] = err[strname][iy] / np.linalg.norm(x0[:,iy])
   for algofunc,strname in algosN:
@@ -389,7 +453,10 @@
       U,S,Vt = np.linalg.svd(D)
       for algofunc,strname in algosL:
         epsilon = 1.1 * np.linalg.norm(realnoise[:,iy])
-        gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd)
+        try:
+          gamma = algofunc(y[:,iy],M,Omega,D,U,S,Vt,epsilon,lbd)
+        except pyCSalgos.BP.l1qc.l1qcInputValueError as e:
+          print "Caught exception when running algorithm",strname," :",e.message
         xrec[strname][ilbd,:,iy] = np.dot(D,gamma)
         err[strname][ilbd,iy]    = np.linalg.norm(x0[:,iy] - xrec[strname][ilbd,:,iy])
         relerr[strname][ilbd,iy] = err[strname][ilbd,iy] / np.linalg.norm(x0[:,iy])
@@ -432,4 +499,4 @@
 if __name__ == "__main__":
   #import cProfile
   #cProfile.run('mainrun()', 'profile')    
-  run()
+  run(std3)